Skip to content

Commit

Permalink
Merge "ratelimit: Allow multiple placements"
Browse files Browse the repository at this point in the history
  • Loading branch information
Zuul authored and openstack-gerrit committed Jun 1, 2020
2 parents ede9dad + 1db11df commit 6c1bc39
Show file tree
Hide file tree
Showing 3 changed files with 73 additions and 50 deletions.
5 changes: 4 additions & 1 deletion etc/proxy-server.conf-sample
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,10 @@ use = egg:swift#s3api
# With either tempauth or your custom auth:
# - Put s3api just before your auth filter(s) in the pipeline
# With keystone:
# - Put s3api and s3token before keystoneauth in the pipeline
# - Put s3api and s3token before keystoneauth in the pipeline, but after
# auth_token
# If you have ratelimit enabled for Swift requests, you may want to place a
# second copy after auth to also ratelimit S3 requests.
#
# Swift has no concept of the S3's resource owner; the resources
# (i.e. containers and objects) created via the Swift API have no owner
Expand Down
4 changes: 4 additions & 0 deletions swift/common/middleware/ratelimit.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,10 @@ def handle_ratelimit(self, req, account_name, container_name, obj_name):
if not self.memcache_client:
return None

if req.environ.get('swift.ratelimit.handled'):
return None
req.environ['swift.ratelimit.handled'] = True

try:
account_info = get_account_info(req.environ, self.app,
swift_source='RL')
Expand Down
114 changes: 65 additions & 49 deletions test/unit/common/middleware/test_ratelimit.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,20 @@ def delete(self, key):


class FakeApp(object):
skip_handled_check = False

def __call__(self, env, start_response):
assert self.skip_handled_check or env.get('swift.ratelimit.handled')
start_response('200 OK', [])
return [b'Some Content']


class FakeReq(object):
def __init__(self, method, env=None):
self.method = method
self.environ = env or {}


def start_response(*args):
pass

Expand Down Expand Up @@ -160,36 +168,29 @@ def test_get_ratelimitable_key_tuples(self):
{'object_count': '5'}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
req = lambda: None
req.environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
req.method = 'DELETE'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', None, None)), 0)
req.method = 'PUT'
FakeReq('DELETE', environ), 'a', None, None)), 0)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 1)
req.method = 'DELETE'
FakeReq('PUT', environ), 'a', 'c', None)), 1)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 1)
req.method = 'GET'
FakeReq('DELETE', environ), 'a', 'c', None)), 1)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', 'o')), 0)
req.method = 'PUT'
FakeReq('GET', environ), 'a', 'c', 'o')), 0)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', 'o')), 1)
FakeReq('PUT', environ), 'a', 'c', 'o')), 1)

req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None, global_ratelimit=10)), 2)
FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)), 2)
self.assertEqual(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None, global_ratelimit=10)[1],
FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)[1],
('ratelimit/global-write/a', 10))

req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None, global_ratelimit='notafloat')), 1)
FakeReq('PUT', environ), 'a', 'c', None,
global_ratelimit='notafloat')), 1)

def test_memcached_container_info_dict(self):
mdict = headers_to_container_info({'x-container-object-count': '45'})
Expand All @@ -204,9 +205,8 @@ def test_ratelimit_old_memcache_format(self):
{'container_size': 5}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
req = lambda: None
req.method = 'PUT'
req.environ = {'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache}
req = FakeReq('PUT', {
'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache})
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o')
Expand All @@ -227,8 +227,8 @@ def test_account_ratelimit(self):
req = Request.blank('/v1/a%s/c' % meth)
req.method = meth
req.environ['swift.cache'] = FakeMemcache()
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
make_app_call = lambda: self.test_ratelimit(
req.environ.copy(), start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate,
check_time=bool(exp_time))
Expand All @@ -244,7 +244,7 @@ def test_ratelimit_set_incr(self):
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].init_incr_return_neg = True
make_app_call = lambda: self.test_ratelimit(req.environ,
make_app_call = lambda: self.test_ratelimit(req.environ.copy(),
start_response)
begin = time.time()
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
Expand All @@ -260,15 +260,15 @@ def test_ratelimit_old_white_black_list(self):
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
req = Request.blank('/')
with mock.patch.object(self.test_ratelimit,
'memcache_client', FakeMemcache()):
self.assertEqual(
self.test_ratelimit.handle_ratelimit(req, 'a', 'c', 'o'),
self.test_ratelimit.handle_ratelimit(
Request.blank('/'), 'a', 'c', 'o'),
None)
self.assertEqual(
self.test_ratelimit.handle_ratelimit(
req, 'b', 'c', 'o').status_int,
Request.blank('/'), 'b', 'c', 'o').status_int,
497)

def test_ratelimit_whitelist_sysmeta(self):
Expand Down Expand Up @@ -331,7 +331,7 @@ def __init__(self, parent):
self.parent = parent

def run(self):
self.result = self.parent.test_ratelimit(req.environ,
self.result = self.parent.test_ratelimit(req.environ.copy(),
start_response)

def get_fake_ratelimit(*args, **kwargs):
Expand Down Expand Up @@ -370,18 +370,17 @@ def test_ratelimit_max_rate_double(self):
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
print(repr(r))
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')

def test_ratelimit_max_rate_double_container(self):
Expand All @@ -404,17 +403,17 @@ def test_ratelimit_max_rate_double_container(self):
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')

def test_ratelimit_max_rate_double_container_listing(self):
Expand All @@ -437,17 +436,17 @@ def test_ratelimit_max_rate_double_container_listing(self):
lambda *args, **kwargs: {}):
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')
mc = self.test_ratelimit.memcache_client
try:
Expand All @@ -466,9 +465,6 @@ def test_ratelimit_max_rate_multiple_acc(self):

the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
req = lambda: None
req.method = 'PUT'
req.environ = {}

class rate_caller(threading.Thread):

Expand All @@ -478,8 +474,8 @@ def __init__(self, name):

def run(self):
for j in range(num_calls):
self.result = the_app.handle_ratelimit(req, self.myname,
'c', None)
self.result = the_app.handle_ratelimit(
FakeReq('PUT'), self.myname, 'c', None)

with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
Expand Down Expand Up @@ -541,7 +537,9 @@ def test_no_memcache(self):
current_rate = 13
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
fake_app = FakeApp()
fake_app.skip_handled_check = True
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(fake_app)
req = Request.blank('/v1/a')
req.environ['swift.cache'] = None
make_app_call = lambda: self.test_ratelimit(req.environ,
Expand All @@ -551,6 +549,24 @@ def test_no_memcache(self):
time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting

def test_already_handled(self):
current_rate = 13
num_calls = 5
conf_dict = {'container_listing_ratelimit_0': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
fake_cache = FakeMemcache()
fake_cache.set(
get_cache_key('a', 'c'),
{'object_count': 1})
req = Request.blank('/v1/a/c', environ={'swift.cache': fake_cache})
req.environ['swift.ratelimit.handled'] = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting

def test_restarting_memcache(self):
current_rate = 2
num_calls = 5
Expand Down

0 comments on commit 6c1bc39

Please sign in to comment.