Skip to content

add traffic allocation support #56

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
May 5, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 14 additions & 1 deletion splitio/clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from splitio.splitters import Splitter
from splitio.splits import (SelfRefreshingSplitFetcher, SplitParser, ApiSplitChangeFetcher,
JSONFileSplitFetcher, InMemorySplitFetcher, AllKeysSplit,
CacheBasedSplitFetcher)
CacheBasedSplitFetcher, ConditionType)
from splitio.segments import (ApiSegmentChangeFetcher, SelfRefreshingSegmentFetcher,
JSONFileSegmentFetcher)
from splitio.config import DEFAULT_CONFIG, MAX_INTERVAL, parse_config_file
Expand Down Expand Up @@ -175,7 +175,20 @@ def _get_treatment_for_split(self, split, matching_key, bucketing_key, attribute
if bucketing_key is None:
bucketing_key = matching_key

roll_out = False
for condition in split.conditions:
if (not roll_out and
condition.condition_type == ConditionType.ROLLOUT):
if split.traffic_allocation < 100:
bucket = self.get_splitter().get_bucket(
bucketing_key,
split.traffic_allocation_seed,
split.algo
)
if bucket >= split.traffic_allocation:
return split.default_treatment, Label.NOT_IN_SPLIT
roll_out = True

if condition.matcher.match(matching_key, attributes=attributes):
return self.get_splitter().get_treatment(
bucketing_key,
Expand Down
5 changes: 5 additions & 0 deletions splitio/impressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,11 @@ class Label(object):
#Label: split not found
SPLIT_NOT_FOUND = 'rules not found'

#Condition: Traffic allocation failed
#Treatment: Default Treatment
#Label: not in split
NOT_IN_SPLIT = 'not in split'

# Condition: There was an exception
# Treatment: control
# Label: exception
Expand Down
15 changes: 10 additions & 5 deletions splitio/redis_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -651,7 +651,9 @@ def _parse_split(self, split, block_until_ready=False):
split['defaultTreatment'], split['trafficTypeName'],
split['status'], split['changeNumber'],
segment_cache=self._segment_cache,
algo=split.get('algo')
algo=split.get('algo'),
traffic_allocation=split.get('trafficAllocation'),
traffic_allocation_seed=split.get('trafficAllocationSeed')
)

def _parse_matcher_in_segment(self, partial_split, matcher, block_until_ready=False, *args,
Expand All @@ -666,7 +668,8 @@ def _parse_matcher_in_segment(self, partial_split, matcher, block_until_ready=Fa
class RedisSplit(Split):
def __init__(self, name, seed, killed, default_treatment, traffic_type_name,
status, change_number, conditions=None, segment_cache=None,
algo=None):
algo=None, traffic_allocation=None,
traffic_allocation_seed=None):
'''
A split implementation that mantains a reference to the segment cache
so segments can be easily pickled and unpickled.
Expand All @@ -683,9 +686,11 @@ def __init__(self, name, seed, killed, default_treatment, traffic_type_name,
:param segment_cache: A segment cache
:type segment_cache: SegmentCache
'''
super(RedisSplit, self).__init__(name, seed, killed, default_treatment,
traffic_type_name, status,
change_number, conditions, algo)
super(RedisSplit, self).__init__(
name, seed, killed, default_treatment, traffic_type_name, status,
change_number, conditions, algo, traffic_allocation,
traffic_allocation_seed
)
self._segment_cache = segment_cache

@property
Expand Down
63 changes: 56 additions & 7 deletions splitio/splits.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,18 @@ class HashAlgorithm(Enum):
MURMUR = 2


class ConditionType(Enum):
"""
Split possible condition types
"""
WHITELIST = 'WHITELIST'
ROLLOUT = 'ROLLOUT'


class Split(object):
def __init__(self, name, seed, killed, default_treatment, traffic_type_name,
status, change_number, conditions=None, algo=None):
status, change_number, conditions=None, algo=None,
traffic_allocation=None, traffic_allocation_seed=None):
"""
A class that represents a split. It associates a feature name with a set
of matchers (responsible of telling which condition to use) and
Expand All @@ -64,6 +73,13 @@ def __init__(self, name, seed, killed, default_treatment, traffic_type_name,
self._status = status
self._change_number = change_number
self._conditions = conditions if conditions is not None else []

if traffic_allocation >= 0 and traffic_allocation <= 100:
self._traffic_allocation = traffic_allocation
else:
self._traffic_allocation = 100

self._traffic_allocation_seed = traffic_allocation_seed
try:
self._algo = HashAlgorithm(algo)
except ValueError:
Expand Down Expand Up @@ -105,6 +121,14 @@ def change_number(self):
def conditions(self):
return self._conditions

@property
def traffic_allocation(self):
return self._traffic_allocation

@property
def traffic_allocation_seed(self):
return self._traffic_allocation_seed

@python_2_unicode_compatible
def __str__(self):
return 'name: {name}, seed: {seed}, killed: {killed}, ' \
Expand Down Expand Up @@ -133,7 +157,8 @@ def __init__(self, name, treatment):


class Condition(object):
def __init__(self, matcher, partitions, label):
def __init__(self, matcher, partitions, label,
condition_type=ConditionType.WHITELIST):
"""
A class that represents a split condition. It associates a matcher with
a set of partitions.
Expand All @@ -145,6 +170,7 @@ def __init__(self, matcher, partitions, label):
self._matcher = matcher
self._partitions = tuple(partitions)
self._label = label
self._confition_type = condition_type

@property
def matcher(self):
Expand All @@ -158,6 +184,10 @@ def partitions(self):
def label(self):
return self._label

@property
def condition_type(self):
return self._confition_type

@python_2_unicode_compatible
def __str__(self):
return '{matcher} then split {partitions}'.format(
Expand Down Expand Up @@ -603,10 +633,18 @@ def _parse_split(self, split, block_until_ready=False):
:return: A partial parsed split
:rtype: Split
"""
return Split(split['name'], split['seed'], split['killed'],
split['defaultTreatment'], split['trafficTypeName'],
split['status'], split['changeNumber'],
algo=split.get('algo'))
return Split(
split['name'],
split['seed'],
split['killed'],
split['defaultTreatment'],
split['trafficTypeName'],
split['status'],
split['changeNumber'],
algo=split.get('algo'),
traffic_allocation=split.get('trafficAllocation'),
traffic_allocation_seed=split.get('trafficAllocationSeed')
)

def _parse_conditions(self, partial_split, split, block_until_ready=False):
"""Parse split conditions
Expand All @@ -630,8 +668,19 @@ def _parse_conditions(self, partial_split, split, block_until_ready=False):
label = None
if 'label' in condition:
label = condition['label']

try:
condition_type = ConditionType(condition.get('conditionType'))
except:
condition_type = ConditionType.WHITELIST

partial_split.conditions.append(
Condition(combining_matcher, parsed_partitions, label)
Condition(
combining_matcher,
parsed_partitions,
label,
condition_type
)
)

def _parse_matcher_group(self, partial_split, matcher_group,
Expand Down
7 changes: 4 additions & 3 deletions splitio/splitters.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,21 @@ def get_treatment(self, key, seed, partitions, algo):
if len(partitions) == 1 and partitions[0].size == 100:
return partitions[0].treatment

hashfn = get_hash_fn(algo)
return self.get_treatment_for_bucket(
self.get_bucket(hashfn(key, seed)),
self.get_bucket(key, seed, algo),
partitions
)

def get_bucket(self, key_hash):
def get_bucket(self, key, seed, algo):
"""
Get the bucket for a key hash
:param key_hash: The hash for a key
:type key_hash: int
:return: The bucked for a hash
:rtype: int
"""
hashfn = get_hash_fn(algo)
key_hash = hashfn(key, seed)
return abs(key_hash) % 100 + 1

def get_treatment_for_bucket(self, bucket, partitions):
Expand Down
2 changes: 2 additions & 0 deletions splitio/tests/test_redis_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,6 +574,8 @@ def test_parse_split_calls_redis_split_constructor(self):
self.some_split['name'], self.some_split['seed'], self.some_split['killed'],
self.some_split['defaultTreatment'],self.some_split['trafficTypeName'],
self.some_split['status'], self.some_split['changeNumber'], segment_cache=self.some_segment_cache,
traffic_allocation=self.some_split.get('trafficAllocation'),
traffic_allocation_seed=self.some_split.get('trafficAllocationSeed'),
algo=self.some_split['algo']
)

Expand Down
99 changes: 93 additions & 6 deletions splitio/tests/test_splits.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import json
from splitio.splits import (InMemorySplitFetcher, SelfRefreshingSplitFetcher, SplitChangeFetcher,
ApiSplitChangeFetcher, SplitParser, AllKeysSplit,
CacheBasedSplitFetcher, HashAlgorithm)
CacheBasedSplitFetcher, HashAlgorithm, ConditionType)
from splitio.matchers import (AndCombiner, AllKeysMatcher, UserDefinedSegmentMatcher,
WhitelistMatcher, AttributeMatcher)
from splitio.tests.utils import MockUtilsMixin
Expand All @@ -21,7 +21,7 @@
from splitio.hashfns.legacy import legacy_hash
from splitio.redis_support import get_redis, RedisSegmentCache, RedisSplitParser
from splitio.uwsgi import get_uwsgi, UWSGISegmentCache, UWSGISplitParser

from splitio.clients import RedisClient

class InMemorySplitFetcherTests(TestCase):
def setUp(self):
Expand Down Expand Up @@ -505,10 +505,23 @@ def test_creates_condition_on_each_condition(self):
self.parser._parse(self.some_split)

self.assertListEqual(
[mock.call(self.parse_matcher_group_mock_side_effect[0],
[self.partition_mock_side_effect[0]], self.label_0),
mock.call(self.parse_matcher_group_mock_side_effect[1],
[self.partition_mock_side_effect[1], self.partition_mock_side_effect[2]], self.label_1)],
[
mock.call(
self.parse_matcher_group_mock_side_effect[0],
[self.partition_mock_side_effect[0]],
self.label_0,
ConditionType.WHITELIST
),
mock.call(
self.parse_matcher_group_mock_side_effect[1],
[
self.partition_mock_side_effect[1],
self.partition_mock_side_effect[2]
],
self.label_1,
ConditionType.WHITELIST
)
],
self.condition_mock.call_args_list
)

Expand Down Expand Up @@ -993,3 +1006,77 @@ def testAlgoHandlers(self):
split = split_parser.parse(sp['body'], True)
self.assertEqual(split.algo, sp['algo'])
self.assertEqual(get_hash_fn(split.algo), sp['hashfn'])


class TrafficAllocationTests(TestCase):
'''
'''

def setUp(self):
'''
'''
redis = get_redis({})
segment_cache = RedisSegmentCache(redis)
split_parser = RedisSplitParser(segment_cache)
self._client = RedisClient(redis)

self._splitObjects = {}

raw_split = {
'name': 'test1',
'algo': 1,
'killed': False,
'status': 'ACTIVE',
'defaultTreatment': 'default',
'seed': -1222652054,
'orgId': None,
'environment': None,
'trafficTypeId': None,
'trafficTypeName': None,
'changeNumber': 1,
'conditions': [{
'conditionType': 'WHITELIST',
'matcherGroup': {
'combiner': 'AND',
'matchers': [{
'matcherType': 'ALL_KEYS',
'negate': False,
'userDefinedSegmentMatcherData': None,
'whitelistMatcherData': None
}]
},
'partitions': [{
'treatment': 'on',
'size': 100
}],
'label': 'in segment all'
}]
}
self._splitObjects['whitelist'] = split_parser.parse(raw_split, True)

raw_split['name'] = 'test2'
raw_split['conditions'][0]['conditionType'] = 'ROLLOUT'
self._splitObjects['rollout1'] = split_parser.parse(raw_split, True)

raw_split['name'] = 'test3'
raw_split['trafficAllocation'] = 1
raw_split['trafficAllocationSeed'] = -1
self._splitObjects['rollout2'] = split_parser.parse(raw_split, True)

def testTrafficAllocation(self):
'''
'''
treatment1, label1 = self._client._get_treatment_for_split(
self._splitObjects['whitelist'], 'testKey', None
)
self.assertEqual(treatment1, 'on')

treatment2, label1 = self._client._get_treatment_for_split(
self._splitObjects['rollout1'], 'testKey', None
)
self.assertEqual(treatment2, 'on')

treatment3, label1 = self._client._get_treatment_for_split(
self._splitObjects['rollout2'], 'testKey', None
)
self.assertEqual(treatment3, 'default')
16 changes: 13 additions & 3 deletions splitio/tests/test_splitters.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,16 +166,26 @@ def test_with_sample_data(self):
with open(join(dirname(__file__), 'sample-data.jsonl')) as f:
for line in map(loads, f):
seed, key, hash_, bucket = line
self.assertEqual(int(bucket), self.splitter.get_bucket(int(hash_)))

self.assertEqual(
int(bucket),
self.splitter.get_bucket(key, seed, HashAlgorithm.LEGACY)
)

# This test is being skipped because apparently LEGACY hash for
# non-alphanumeric keys isn't working properly.
# TODO: Discuss with @sarrubia whether we should raise ticket for this.
@skip
def test_with_non_alpha_numeric_sample_data(self):
"""
Tests hash_key against expected values using non alphanumeric values
"""
with open(join(dirname(__file__), 'sample-data-non-alpha-numeric.jsonl')) as f:
for line in map(loads, f):
seed, key, hash_, bucket = line
self.assertEqual(int(bucket), self.splitter.get_bucket(int(hash_)))
self.assertEqual(
int(bucket),
self.splitter.get_bucket(key, seed, HashAlgorithm.LEGACY)
)


@skip
Expand Down
Loading