diff --git a/docs/ClientWrapperUsage.md b/docs/ClientWrapperUsage.md index ed21fa4..99a632d 100644 --- a/docs/ClientWrapperUsage.md +++ b/docs/ClientWrapperUsage.md @@ -44,3 +44,39 @@ data[0] filtered_data = [{'acc': item['acc'], 'stars': item['stars'], 'hash': item['hash']} for item in data] filtered_data[0] ``` + +## SimpleBeatSaverAPI + +```python +from helpers.SimpleBeatSaverAPI import SimpleBeatSaverAPI +beat_saver_api = SimpleBeatSaverAPI() + +curated_songs = beat_saver_api.get_curated_songs(use_cache=False) +mapper_maps = beat_saver_api.get_mapper_maps(mapper_id=4285738, use_cache=False) +``` + +## ScoreSaberAPI + +```python +from helpers.ScoreSaberAPI import ScoreSaberAPI +from clients.scoresaber.models.get_api_player_player_id_scores_sort import GetApiPlayerPlayerIdScoresSort + +# Instantiate the API client +scoresaber_api = ScoreSaberAPI() + +# Specify the player ID you want to fetch scores for +player_id = "76561199407393962" + +# Fetch player scores +scores_data = scoresaber_api.get_player_scores( + player_id=player_id, + use_cache=True, # Use cached data if available + limit=100, # Number of scores per page + sort=GetApiPlayerPlayerIdScoresSort.RECENT, # Sort by most recent scores + max_pages=2 # Maximum number of pages to fetch +) +ranked_scores = [score for score in scores_data.get('playerScores') if score['leaderboard']['stars'] != 0] + +print(f"Got {len(scores_data.get('playerScores'))} scores for player {player_id}") +print(f"Got {len(ranked_scores)} ranked scores for player {player_id}") +``` diff --git a/docs/Strategies.md b/docs/Strategies.md new file mode 100644 index 0000000..374d059 --- /dev/null +++ b/docs/Strategies.md @@ -0,0 +1,7 @@ +# Playlist Strategies + +## Accuracy Gaps + +Prioritizes songs that are furthest below the median accuracy for the player. + +![Accuracy Gaps](images/accuracy-gaps.png) diff --git a/docs/images/accuracy-gaps.png b/docs/images/accuracy-gaps.png new file mode 100644 index 0000000..848b3dc Binary files /dev/null and b/docs/images/accuracy-gaps.png differ diff --git a/src/saberlist/make.py b/src/saberlist/make.py index 1b51fbe..8df77e6 100644 --- a/src/saberlist/make.py +++ b/src/saberlist/make.py @@ -22,7 +22,7 @@ from helpers.SimpleBeatSaverAPI import SimpleBeatSaverAPI from saberlist.utils import reset_history from saberlist.playlist_strategies.oldscores import playlist_strategy_beatleader_oldscores, playlist_strategy_scoresaber_oldscores -from saberlist.playlist_strategies.accuracy import playlist_strategy_beatleader_lowest_acc, playlist_strategy_beatleader_accuracy_gaps +from saberlist.playlist_strategies.accuracy import playlist_strategy_beatleader_lowest_acc, playlist_strategy_beatleader_accuracy_gaps, playlist_strategy_scoresaber_accuracy_gaps from saberlist.playlist_strategies.performance import playlist_strategy_beatleader_lowest_pp, playlist_strategy_scoresaber_lowest_pp from saberlist.playlist_strategies.beatsaver import playlist_strategy_beatsaver_acc, playlist_strategy_beatsaver_curated, playlist_strategy_beatsaver_mappers @@ -54,6 +54,9 @@ def saberlist() -> None: elif strategy == 'beatleader_accuracy_gaps': playlist_data, playlist_title = playlist_strategy_beatleader_accuracy_gaps(SimpleBeatLeaderAPI(cache_expiry_days=CACHE_EXPIRY_DAYS)) playlist_builder = PlaylistBuilder(covers_dir='./covers/pajamas') + elif strategy == 'scoresaber_accuracy_gaps': + playlist_data, playlist_title = playlist_strategy_scoresaber_accuracy_gaps(ScoreSaberAPI(cache_expiry_days=CACHE_EXPIRY_DAYS)) + playlist_builder = PlaylistBuilder(covers_dir='./covers/scoresaber') elif strategy == 'beatsaver_curated': playlist_data, playlist_title = playlist_strategy_beatsaver_curated(SimpleBeatSaverAPI()) playlist_builder = PlaylistBuilder(covers_dir='./covers/curated') @@ -80,11 +83,12 @@ def get_strategy(): choices=[ "scoresaber_oldscores", "beatleader_oldscores", - "beatsaver_acc", + # "beatsaver_acc", # "beatleader_lowest_pp", # "scoresaber_lowest_pp", # "beatleader_lowest_acc", "beatleader_accuracy_gaps", + "scoresaber_accuracy_gaps", "beatsaver_curated", "beatsaver_mappers" ], diff --git a/src/saberlist/playlist_strategies/accuracy.py b/src/saberlist/playlist_strategies/accuracy.py index c0191e0..a2c5431 100644 --- a/src/saberlist/playlist_strategies/accuracy.py +++ b/src/saberlist/playlist_strategies/accuracy.py @@ -16,9 +16,170 @@ logging.basicConfig( ) from helpers.SimpleBeatLeaderAPI import SimpleBeatLeaderAPI +from helpers.BeatLeaderAPI import BeatLeaderAPI from saberlist.utils import prompt_for_player_id, load_history, save_history, normalize_difficulty_name + +from helpers.ScoreSaberAPI import ScoreSaberAPI +from clients.scoresaber.models.get_api_player_player_id_scores_sort import GetApiPlayerPlayerIdScoresSort + +"""Testing +api = ScoreSaberAPI() +song_count = 40 +bin_size = 0.25 +bin_sort = False +""" +def playlist_strategy_scoresaber_accuracy_gaps( + api: ScoreSaberAPI, + song_count: int = 40, + bin_size: float = 0.25, + bin_sort: bool = False +) -> List[Dict[str, Any]]: + """ + Build a playlist of songs where the player's accuracy is furthest below the median accuracy + for their star rating range. Songs are grouped into bins by star rating to ensure fair comparison. + + :param api: ScoreSaberAPI instance for making API calls + :param song_count: Number of songs to include in the playlist + :param bin_size: Size of star rating bins for grouping similar difficulty songs + :param bin_sort: Whether to sort the bins by star rating + :return: A tuple containing (list of song dictionaries, playlist title string) + """ + player_id = prompt_for_player_id() + history = load_history() + history.setdefault('scoresaber_accuracy_gaps', {}) + history.setdefault('playlist_counts', {}) + + # Get the current count and increment it + count_key = 'scoresaber_accuracy_gaps' + current_count = history['playlist_counts'].get(count_key, 0) + new_count = current_count + 1 + history['playlist_counts'][count_key] = new_count + + # Fetch player scores + scores_data = api.get_player_scores( + player_id=player_id, + use_cache=True, + limit=100, # per page + sort=GetApiPlayerPlayerIdScoresSort.RECENT + ) + ranked_scores = [score for score in scores_data.get('playerScores', []) + if score.get('leaderboard', {}).get('stars', 0) != 0] + + if not ranked_scores: + logging.warning(f"No ranked scores found for player ID {player_id} on ScoreSaber.") + return [], "" + logging.debug(f"Found {len(ranked_scores)} ranked scores for player ID {player_id} on ScoreSaber.") + + # Get min and max star ratings + min_stars = min(score['leaderboard']['stars'] for score in ranked_scores) + max_stars = max(score['leaderboard']['stars'] for score in ranked_scores) + star_range = max_stars - min_stars + + # Determine number of bins + num_bins = math.ceil(star_range / bin_size) + logging.info(f"Using bin size: {bin_size}, resulting in {num_bins} bins.") + + # Group accuracies by bins + bin_to_accuracies = defaultdict(list) + for score in ranked_scores: + # Calculate accuracy + try: + modified_score = score['score']['modifiedScore'] + max_score = score['leaderboard']['maxScore'] + accuracy = modified_score / max_score if max_score else 0 + score['accuracy'] = accuracy + except Exception as e: + logging.error(f"Error calculating accuracy for score {score}: {e}") + continue + + stars = score['leaderboard'].get('stars') + if stars is not None and accuracy is not None: + bin_index = int((stars - min_stars) / bin_size) + bin_to_accuracies[bin_index].append(accuracy) + + # Calculate median accuracy for each bin + bin_to_median = {} + for bin_index, accuracies in bin_to_accuracies.items(): + bin_to_median[bin_index] = median(accuracies) + bin_start = min_stars + bin_index * bin_size + bin_end = bin_start + bin_size + logging.debug(f"Median accuracy for bin {bin_index} (stars {bin_start:.2f} to {bin_end:.2f}): {bin_to_median[bin_index]:.4f}") + + # Compute difference from median for each score + for score in ranked_scores: + stars = score['leaderboard'].get('stars') + accuracy = score.get('accuracy') + if stars is not None and accuracy is not None: + bin_index = int((stars - min_stars) / bin_size) + median_acc = bin_to_median.get(bin_index) + if median_acc is not None: + score['diff_from_median'] = accuracy - median_acc + else: + score['diff_from_median'] = float('inf') # Place entries with missing data at the end + else: + score['diff_from_median'] = float('inf') # Place entries with missing data at the end + + # Sort scores by difference from median (ascending: most below median first) + ranked_scores.sort(key=lambda x: x.get('diff_from_median', float('inf'))) + + playlist_data = [] + for score in ranked_scores: + if len(playlist_data) >= song_count: + break + + accuracy = score['score'].get('accuracy', 0) + stars = score['leaderboard'].get('stars') + song_hash = score['leaderboard'].get('songHash') + + if not song_hash or stars is None: + logging.debug(f"Skipping score due to missing hash or stars: {score}") + continue + + difficulty_raw = score['leaderboard']['difficulty'].get('difficultyRaw', '') + game_mode = score['leaderboard']['difficulty'].get('gameMode', 'Standard') + game_mode = game_mode.replace('Solo', '') # Remove prefix 'Solo' from the game mode + difficulty = normalize_difficulty_name(difficulty_raw) + + # Avoid reusing song+difficulty + if song_hash in history['scoresaber_accuracy_gaps'] and difficulty in history['scoresaber_accuracy_gaps'][song_hash]: + logging.debug(f"Skipping song {song_hash} with difficulty {difficulty} as it's in history.") + continue + + song_dict = { + 'hash': song_hash, + 'difficulties': [ + { + 'name': difficulty, + 'characteristic': game_mode + } + ] + } + + playlist_data.append(song_dict) + song_name = score['leaderboard']['songName'] + song_artist = score['leaderboard']['songAuthorName'] + logging.debug(f"Selected song for playlist: Name={song_name}, Artist={song_artist}, " + f"Accuracy={accuracy*100:.2f}%, Diff from Median={score['diff_from_median']*100:.2f}%") + + # Update history + history['scoresaber_accuracy_gaps'].setdefault(song_hash, []).append(difficulty) + + if not playlist_data: + logging.info("No new songs found to add to the playlist based on history for ScoreSaber accuracy gaps.") + else: + for song in playlist_data: + song_hash = song['hash'] + difficulty = song['difficulties'][0]['name'] + logging.info(f"Song added: Hash={song_hash}, Difficulty={difficulty}") + logging.info(f"Total songs added to playlist from ScoreSaber accuracy gaps: {len(playlist_data)}") + + save_history(history) + playlist_title = f"scoresaber_accgraph-{new_count:02d}" + + return playlist_data, playlist_title + def playlist_strategy_beatleader_accuracy_gaps( api: SimpleBeatLeaderAPI, song_count: int = 40,