From f2e70b6604383e003c18844199ce1bd1fe263556 Mon Sep 17 00:00:00 2001 From: BlackYps <52536103+BlackYps@users.noreply.github.com> Date: Sat, 18 Sep 2021 22:42:14 +0200 Subject: [PATCH] Change game quality calculations (#833) * Change game quality calculations * Fix newbie matching integration test * Line break in comment --- server/config.py | 8 ++++---- server/matchmaker/algorithm/team_matchmaker.py | 18 ++++++++++-------- tests/data/test-data.sql | 2 +- ...est_matchmaker_algorithm_team_matchmaker.py | 18 ++++++++++++------ 4 files changed, 27 insertions(+), 19 deletions(-) diff --git a/server/config.py b/server/config.py index a6f2744d3..bfd090805 100644 --- a/server/config.py +++ b/server/config.py @@ -89,12 +89,12 @@ def __init__(self): # Difference of cumulated rating of the teams self.MAXIMUM_RATING_IMBALANCE = 500 # stdev of the ratings of all participating players - self.MAXIMUM_RATING_DEVIATION = 750 + self.MAXIMUM_RATING_DEVIATION = 500 # Quality bonus for each failed matching attempt per full team self.TIME_BONUS = 0.02 - self.MAXIMUM_TIME_BONUS = 0.4 - self.NEWBIE_TIME_BONUS = 0.2 - self.MAXIMUM_NEWBIE_TIME_BONUS = 1.6 + self.MAXIMUM_TIME_BONUS = 0.2 + self.NEWBIE_TIME_BONUS = 0.9 + self.MAXIMUM_NEWBIE_TIME_BONUS = 2.7 self.TWILIO_ACCOUNT_SID = "" self.TWILIO_TOKEN = "" diff --git a/server/matchmaker/algorithm/team_matchmaker.py b/server/matchmaker/algorithm/team_matchmaker.py index 8eed23aac..5ffff56bf 100644 --- a/server/matchmaker/algorithm/team_matchmaker.py +++ b/server/matchmaker/algorithm/team_matchmaker.py @@ -1,6 +1,7 @@ import logging import statistics from collections import defaultdict +from math import sqrt from typing import Dict, Iterable, List, NamedTuple, Set, Tuple from sortedcontainers import SortedList @@ -281,17 +282,18 @@ def assign_game_quality(self, match: Match, team_size: int) -> GameCandidate: newbie_bonus += min(search_newbie_bonus, config.MAXIMUM_NEWBIE_TIME_BONUS * num_newbies / team_size) rating_disparity = abs(match[0].cumulative_rating - match[1].cumulative_rating) - fairness = 1 - (rating_disparity / config.MAXIMUM_RATING_IMBALANCE) + unfairness = rating_disparity / config.MAXIMUM_RATING_IMBALANCE deviation = statistics.pstdev(ratings) - uniformity = 1 - (deviation / config.MAXIMUM_RATING_DEVIATION) + rating_variety = deviation / config.MAXIMUM_RATING_DEVIATION - quality = fairness * uniformity - if fairness < 0 and uniformity < 0: - quality *= -1 - quality += newbie_bonus + time_bonus + # Visually this creates a cone in the unfairness-rating_variety plane + # that slowly raises with the time bonuses. + quality = 1 - sqrt(unfairness ** 2 + rating_variety ** 2) + time_bonus + if not any(team.has_top_player() for team in match): + quality += newbie_bonus self._logger.debug( - "bonuses: %s rating disparity: %s -> fairness: %f deviation: %f -> uniformity: %f -> game quality: %f", - newbie_bonus + time_bonus, rating_disparity, fairness, deviation, uniformity, quality) + "bonuses: %s rating disparity: %s -> unfairness: %f deviation: %f -> variety: %f -> game quality: %f", + newbie_bonus + time_bonus, rating_disparity, unfairness, deviation, rating_variety, quality) return GameCandidate(match, quality) def pick_noncolliding_games(self, games: List[GameCandidate]) -> List[Match]: diff --git a/tests/data/test-data.sql b/tests/data/test-data.sql index dbdb64b4a..542c3703f 100644 --- a/tests/data/test-data.sql +++ b/tests/data/test-data.sql @@ -132,7 +132,7 @@ insert into leaderboard_rating (login_id, mean, deviation, total_games, leaderbo (102, 1500, 500, 0, 1), (102, 1500, 500, 0, 2), (105, 1400, 150, 20, 3), - (106, 1500, 75, 20, 3) + (106, 900, 75, 20, 3) ; -- legacy table for global rating diff --git a/tests/unit_tests/test_matchmaker_algorithm_team_matchmaker.py b/tests/unit_tests/test_matchmaker_algorithm_team_matchmaker.py index 72f78075c..ca366a0c3 100644 --- a/tests/unit_tests/test_matchmaker_algorithm_team_matchmaker.py +++ b/tests/unit_tests/test_matchmaker_algorithm_team_matchmaker.py @@ -80,10 +80,10 @@ def test_team_matchmaker_algorithm(player_factory): matches, unmatched = matchmaker.find(s, 4) - assert set(matches[0][0].get_original_searches()) == {c1, s[2], s[5]} - assert set(matches[0][1].get_original_searches()) == {c3, s[1], s[6]} - assert set(matches[1][0].get_original_searches()) == {c4, s[4]} - assert set(matches[1][1].get_original_searches()) == {c2, s[0], s[3]} + assert set(matches[1][0].get_original_searches()) == {c1, s[2], s[5]} + assert set(matches[1][1].get_original_searches()) == {c3, s[1], s[6]} + assert set(matches[0][0].get_original_searches()) == {c4, s[4]} + assert set(matches[0][1].get_original_searches()) == {c2, s[0], s[3]} assert set(unmatched) == {s[7]} for match in matches: assert matchmaker.assign_game_quality(match, 4).quality > config.MINIMUM_GAME_QUALITY @@ -259,7 +259,10 @@ def test_game_quality_time_bonus(s): team_b.register_failed_matching_attempt() quality_after = matchmaker.assign_game_quality((team_a, team_b), 3).quality - num_newbies = team_a.num_newbies() + team_b.num_newbies() + if team_a.has_top_player() or team_b.has_top_player(): + num_newbies = 0 + else: + num_newbies = team_a.num_newbies() + team_b.num_newbies() assert ( quality_before @@ -283,7 +286,10 @@ def test_game_quality_max_time_bonus(s): team_b.register_failed_matching_attempt() quality_after = matchmaker.assign_game_quality((team_a, team_b), 3).quality - num_newbies = team_a.num_newbies() + team_b.num_newbies() + if team_a.has_top_player() or team_b.has_top_player(): + num_newbies = 0 + else: + num_newbies = team_a.num_newbies() + team_b.num_newbies() assert ( quality_before