Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unrevert s3 config #673

Merged
merged 5 commits into from
Oct 13, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 101 additions & 11 deletions botocore/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,15 @@
from botocore.endpoint import EndpointCreator, DEFAULT_TIMEOUT
from botocore.exceptions import ClientError, DataNotFoundError
from botocore.exceptions import OperationNotPageableError
from botocore.exceptions import InvalidS3AddressingStyleError
from botocore.hooks import first_non_none_response
from botocore.model import ServiceModel
from botocore.paginate import Paginator
from botocore.signers import RequestSigner
from botocore.utils import CachedProperty
from botocore.utils import get_service_module_name
from botocore.utils import fix_s3_host
from botocore.utils import switch_to_virtual_host_style
from botocore.docs.docstring import ClientMethodDocstring
from botocore.docs.docstring import PaginatorDocstring

Expand Down Expand Up @@ -164,6 +167,30 @@ def _determine_region_name(self, endpoint_config, region_name=None,

return region_name

def _inject_s3_configuration(self, config_kwargs, scoped_config,
client_config):
s3_configuration = None

# Check the scoped config first
if scoped_config is not None:
s3_configuration = scoped_config.get('s3')

# Next specfic client config values takes precedence over
# specific values in the scoped config.
if client_config is not None:
if client_config.s3 is not None:
if s3_configuration is None:
s3_configuration = client_config.s3
else:
# The current s3_configuration dictionary may be
# from a source that only should be read from so
# we want to be safe and just make a copy of it to modify
# before it actually gets updated.
s3_configuration = s3_configuration.copy()
s3_configuration.update(client_config.s3)

config_kwargs['s3'] = s3_configuration

def _get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials,
scoped_config, client_config):
Expand Down Expand Up @@ -218,6 +245,11 @@ def _get_client_args(self, service_model, region_name, is_secure,
config_kwargs.update(
connect_timeout=client_config.connect_timeout,
read_timeout=client_config.read_timeout)

# Add any additional s3 configuration for client
self._inject_s3_configuration(
config_kwargs, scoped_config, client_config)

new_config = Config(**config_kwargs)

endpoint_creator = EndpointCreator(self._endpoint_resolver,
Expand Down Expand Up @@ -307,10 +339,30 @@ def __init__(self, serializer, endpoint, response_parser,
self.meta = ClientMeta(event_emitter, self._client_config,
endpoint.host, service_model,
self._PY_TO_OP_NAME)
self._register_handlers()

def _register_handlers(self):
# Register the handler required to sign requests.
self.meta.events.register('request-created.%s' %
service_model.endpoint_prefix,
self.meta.service_model.endpoint_prefix,
self._sign_request)

# If the virtual host addressing style is being forced,
# switch the default fix_s3_host handler for the more general
# switch_to_virtual_host_style handler that does not have opt out
# cases (other than throwing an error if the name is DNS incompatible)
if self.meta.config.s3 is None:
s3_addressing_style = None
else:
s3_addressing_style = self.meta.config.s3.get('addressing_style')

if s3_addressing_style == 'path':
self.meta.events.unregister('before-sign.s3', fix_s3_host)
elif s3_addressing_style == 'virtual':
self.meta.events.unregister('before-sign.s3', fix_s3_host)
self.meta.events.register(
'before-sign.s3', switch_to_virtual_host_style)

@property
def _service_model(self):
return self.meta.service_model
Expand Down Expand Up @@ -541,23 +593,61 @@ def method_to_api_mapping(self):
class Config(object):
"""Advanced configuration for Botocore clients.

This class allows you to configure:

* Region name
* Signature version
* User agent
* User agent extra
* Connect timeout
* Read timeout

:type region_name: str
:param region_name: The region to use in instantiating the client

:type signature_version: str
:param signature_version: The signature version when signing requests.

:type user_agent: str
:param user_agent: The value to use in the User-Agent header.

:type user_agent_extra: str
:param user_agent_extra: The value to append to the current User-Agent
header value.

:type connect_timeout: int
:param connect_timeout: The time in seconds till a timeout exception is
thrown when attempting to make a connection.

:type read_timeout: int
:param read_timeout: The time in seconds till a timeout exception is
thrown when attempting to read from a connection.

:type s3: dict
:param s3: A dictionary of s3 specific configurations.
Valid keys are:
* 'addressing_style' -- Refers to the style in which to address
s3 endpoints. Values must be a string that equals:
* auto -- Addressing style is chosen for user. Depending
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be worth mentioning this is the default value.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep

on the configuration of client, the endpoint
may be addressed in the virtual or the path
style. Note that this is the default behavior if
no style is specified.
* virtual -- Addressing style is always virtual. The name of
the bucket must be DNS compatible or an
exception will be thrown. Endpoints will be
addressed as such: mybucket.s3.amazonaws.com
* path -- Addressing style is always by path. Endpoints will
be addressed as such: s3.amazonaws.com/mybucket
"""
def __init__(self, region_name=None, signature_version=None,
user_agent=None, user_agent_extra=None,
connect_timeout=DEFAULT_TIMEOUT,
read_timeout=DEFAULT_TIMEOUT):
read_timeout=DEFAULT_TIMEOUT,
s3=None):
self.region_name = region_name
self.signature_version = signature_version
self.user_agent = user_agent
self.user_agent_extra = user_agent_extra
self.connect_timeout = connect_timeout
self.read_timeout = read_timeout
self._validate_s3_configuration(s3)
self.s3 = s3

def _validate_s3_configuration(self, s3):
if s3 is not None:
addressing_style = s3.get('addressing_style')
if addressing_style not in ['virtual', 'auto', 'path', None]:
raise InvalidS3AddressingStyleError(
s3_addressing_style=addressing_style)
19 changes: 19 additions & 0 deletions botocore/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,3 +310,22 @@ class UnsupportedTLSVersionWarning(Warning):

class ImminentRemovalWarning(Warning):
pass


class InvalidDNSNameError(BotoCoreError):
"""Error when virtual host path is forced on a non-DNS compatible bucket"""
fmt = (
'Bucket named {bucket_name} is not DNS compatible. Virtual '
'hosted-style addressing cannot be used. The addressing style '
'can be configured by removing the addressing_style value '
'or setting that value to \'path\' or \'auto\' in the AWS Config '
'file or in the botocore.client.Config object.'
)


class InvalidS3AddressingStyleError(BotoCoreError):
"""Error when an invalid path style is specified"""
fmt = (
'S3 addressing style {s3_addressing_style} is invaild. Valid options '
'are: \'auto\', \'virtual\', and \'path\''
)
53 changes: 46 additions & 7 deletions botocore/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from dateutil.tz import tzlocal, tzutc

from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.exceptions import InvalidDNSNameError
from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit
from botocore.vendored import requests
from botocore.compat import OrderedDict
Expand Down Expand Up @@ -630,6 +631,34 @@ def fix_s3_host(request, signature_version, region_name, **kwargs):
addressing. This allows us to avoid 301 redirects for all
bucket names that can be CNAME'd.
"""
# By default we do not use virtual hosted style addressing when
# signed with signature version 4.
if signature_version in ['s3v4', 'v4']:
return
elif not _allowed_region(region_name):
return
try:
switch_to_virtual_host_style(
request, signature_version, 's3.amazonaws.com')
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)


def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.

:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
Expand All @@ -645,29 +674,39 @@ def fix_s3_host(request, signature_version, region_name, **kwargs):
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
if signature_version in ['s3v4', 'v4']:
return

# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc

if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name) and _allowed_region(region_name):
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
global_endpoint = 's3.amazonaws.com'
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, '/'.join(path_parts),
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
raise InvalidDNSNameError(bucket_name=bucket_name)


def _is_get_bucket_location_request(request):
Expand Down
73 changes: 73 additions & 0 deletions tests/functional/test_s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from tests import unittest, mock, BaseSessionTest

import botocore.session
from botocore.client import Config
from botocore.exceptions import ParamValidationError


Expand Down Expand Up @@ -133,3 +134,75 @@ def test_500_error_with_non_xml_body(self):
# The first response should have been retried even though the xml is
# invalid and eventually return the 200 response.
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)


class BaseS3AddressingStyle(BaseSessionTest):
def setUp(self):
super(BaseS3AddressingStyle, self).setUp()
self.http_response = mock.Mock()
self.http_response.status_code = 200
self.http_response.headers = {}
self.http_response.content = b''


class TestVirtualHostStyle(BaseS3AddressingStyle):
def test_default_endpoint_for_virtual_addressing(self):
s3 = self.session.create_client(
's3', config=Config(s3={'addressing_style': 'virtual'}))
with mock.patch('botocore.endpoint.Session.send') \
as mock_send:
mock_send.return_value = self.http_response
s3.put_object(Bucket='mybucket', Key='mykey', Body='mybody')
request_sent = mock_send.call_args[0][0]
self.assertEqual(
'https://mybucket.s3.amazonaws.com/mykey', request_sent.url)

def test_provided_endpoint_url_for_virtual_addressing(self):
s3 = self.session.create_client(
's3', config=Config(s3={'addressing_style': 'virtual'}),
endpoint_url='https://foo.amazonaws.com')
with mock.patch('botocore.endpoint.Session.send') \
as mock_send:
mock_send.return_value = self.http_response
s3.put_object(Bucket='mybucket', Key='mykey', Body='mybody')
request_sent = mock_send.call_args[0][0]
self.assertEqual(
'https://mybucket.foo.amazonaws.com/mykey', request_sent.url)

def test_us_gov_with_virtual_addressing(self):
s3 = self.session.create_client(
's3', region_name='us-gov-west-1',
config=Config(s3={'addressing_style': 'virtual'}))
with mock.patch('botocore.endpoint.Session.send') \
as mock_send:
mock_send.return_value = self.http_response
s3.put_object(Bucket='mybucket', Key='mykey', Body='mybody')
request_sent = mock_send.call_args[0][0]
self.assertEqual(
'https://mybucket.s3-us-gov-west-1.amazonaws.com/mykey',
request_sent.url)


class TestPathHostStyle(BaseS3AddressingStyle):
def test_default_endpoint_for_path_addressing(self):
s3 = self.session.create_client(
's3', config=Config(s3={'addressing_style': 'path'}))
with mock.patch('botocore.endpoint.Session.send') \
as mock_send:
mock_send.return_value = self.http_response
s3.put_object(Bucket='mybucket', Key='mykey', Body='mybody')
request_sent = mock_send.call_args[0][0]
self.assertEqual(
'https://s3.amazonaws.com/mybucket/mykey', request_sent.url)

def test_provided_endpoint_url_for_path_addressing(self):
s3 = self.session.create_client(
's3', config=Config(s3={'addressing_style': 'path'}),
endpoint_url='https://foo.amazonaws.com')
with mock.patch('botocore.endpoint.Session.send') \
as mock_send:
mock_send.return_value = self.http_response
s3.put_object(Bucket='mybucket', Key='mykey', Body='mybody')
request_sent = mock_send.call_args[0][0]
self.assertEqual(
'https://foo.amazonaws.com/mybucket/mykey', request_sent.url)
Loading