Skip to content

Commit

Permalink
Change game quality calculations (#833)
Browse files Browse the repository at this point in the history
* Change game quality calculations

* Fix newbie matching integration test

* Line break in comment
  • Loading branch information
BlackYps authored Sep 18, 2021
1 parent e2dec4b commit f2e70b6
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 19 deletions.
8 changes: 4 additions & 4 deletions server/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,12 +89,12 @@ def __init__(self):
# Difference of cumulated rating of the teams
self.MAXIMUM_RATING_IMBALANCE = 500
# stdev of the ratings of all participating players
self.MAXIMUM_RATING_DEVIATION = 750
self.MAXIMUM_RATING_DEVIATION = 500
# Quality bonus for each failed matching attempt per full team
self.TIME_BONUS = 0.02
self.MAXIMUM_TIME_BONUS = 0.4
self.NEWBIE_TIME_BONUS = 0.2
self.MAXIMUM_NEWBIE_TIME_BONUS = 1.6
self.MAXIMUM_TIME_BONUS = 0.2
self.NEWBIE_TIME_BONUS = 0.9
self.MAXIMUM_NEWBIE_TIME_BONUS = 2.7

self.TWILIO_ACCOUNT_SID = ""
self.TWILIO_TOKEN = ""
Expand Down
18 changes: 10 additions & 8 deletions server/matchmaker/algorithm/team_matchmaker.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import logging
import statistics
from collections import defaultdict
from math import sqrt
from typing import Dict, Iterable, List, NamedTuple, Set, Tuple

from sortedcontainers import SortedList
Expand Down Expand Up @@ -281,17 +282,18 @@ def assign_game_quality(self, match: Match, team_size: int) -> GameCandidate:
newbie_bonus += min(search_newbie_bonus, config.MAXIMUM_NEWBIE_TIME_BONUS * num_newbies / team_size)

rating_disparity = abs(match[0].cumulative_rating - match[1].cumulative_rating)
fairness = 1 - (rating_disparity / config.MAXIMUM_RATING_IMBALANCE)
unfairness = rating_disparity / config.MAXIMUM_RATING_IMBALANCE
deviation = statistics.pstdev(ratings)
uniformity = 1 - (deviation / config.MAXIMUM_RATING_DEVIATION)
rating_variety = deviation / config.MAXIMUM_RATING_DEVIATION

quality = fairness * uniformity
if fairness < 0 and uniformity < 0:
quality *= -1
quality += newbie_bonus + time_bonus
# Visually this creates a cone in the unfairness-rating_variety plane
# that slowly raises with the time bonuses.
quality = 1 - sqrt(unfairness ** 2 + rating_variety ** 2) + time_bonus
if not any(team.has_top_player() for team in match):
quality += newbie_bonus
self._logger.debug(
"bonuses: %s rating disparity: %s -> fairness: %f deviation: %f -> uniformity: %f -> game quality: %f",
newbie_bonus + time_bonus, rating_disparity, fairness, deviation, uniformity, quality)
"bonuses: %s rating disparity: %s -> unfairness: %f deviation: %f -> variety: %f -> game quality: %f",
newbie_bonus + time_bonus, rating_disparity, unfairness, deviation, rating_variety, quality)
return GameCandidate(match, quality)

def pick_noncolliding_games(self, games: List[GameCandidate]) -> List[Match]:
Expand Down
2 changes: 1 addition & 1 deletion tests/data/test-data.sql
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ insert into leaderboard_rating (login_id, mean, deviation, total_games, leaderbo
(102, 1500, 500, 0, 1),
(102, 1500, 500, 0, 2),
(105, 1400, 150, 20, 3),
(106, 1500, 75, 20, 3)
(106, 900, 75, 20, 3)
;

-- legacy table for global rating
Expand Down
18 changes: 12 additions & 6 deletions tests/unit_tests/test_matchmaker_algorithm_team_matchmaker.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ def test_team_matchmaker_algorithm(player_factory):

matches, unmatched = matchmaker.find(s, 4)

assert set(matches[0][0].get_original_searches()) == {c1, s[2], s[5]}
assert set(matches[0][1].get_original_searches()) == {c3, s[1], s[6]}
assert set(matches[1][0].get_original_searches()) == {c4, s[4]}
assert set(matches[1][1].get_original_searches()) == {c2, s[0], s[3]}
assert set(matches[1][0].get_original_searches()) == {c1, s[2], s[5]}
assert set(matches[1][1].get_original_searches()) == {c3, s[1], s[6]}
assert set(matches[0][0].get_original_searches()) == {c4, s[4]}
assert set(matches[0][1].get_original_searches()) == {c2, s[0], s[3]}
assert set(unmatched) == {s[7]}
for match in matches:
assert matchmaker.assign_game_quality(match, 4).quality > config.MINIMUM_GAME_QUALITY
Expand Down Expand Up @@ -259,7 +259,10 @@ def test_game_quality_time_bonus(s):
team_b.register_failed_matching_attempt()
quality_after = matchmaker.assign_game_quality((team_a, team_b), 3).quality

num_newbies = team_a.num_newbies() + team_b.num_newbies()
if team_a.has_top_player() or team_b.has_top_player():
num_newbies = 0
else:
num_newbies = team_a.num_newbies() + team_b.num_newbies()

assert (
quality_before
Expand All @@ -283,7 +286,10 @@ def test_game_quality_max_time_bonus(s):
team_b.register_failed_matching_attempt()
quality_after = matchmaker.assign_game_quality((team_a, team_b), 3).quality

num_newbies = team_a.num_newbies() + team_b.num_newbies()
if team_a.has_top_player() or team_b.has_top_player():
num_newbies = 0
else:
num_newbies = team_a.num_newbies() + team_b.num_newbies()

assert (
quality_before
Expand Down

0 comments on commit f2e70b6

Please sign in to comment.