From d129797294d49e47c59aa94a65c985f0b315ee6e Mon Sep 17 00:00:00 2001 From: William Huber Date: Thu, 12 Dec 2019 15:34:37 -0600 Subject: [PATCH] Release 2.6.0 --- CHANGELOG.md | 8 + error] | 53 ++ ibm_botocore/__init__.py | 2 +- ibm_botocore/args.py | 109 ++-- ibm_botocore/auth.py | 13 - ibm_botocore/client.py | 107 +--- ibm_botocore/configprovider.py | 177 ++++-- ibm_botocore/credentials.py | 436 +++++++++++--- .../data/s3/2006-03-01/service-2.json | 529 ++++++++++++++++- ibm_botocore/endpoint.py | 2 +- ibm_botocore/exceptions.py | 24 + ibm_botocore/handlers.py | 35 +- ibm_botocore/httpsession.py | 3 +- ibm_botocore/session.py | 47 +- ibm_botocore/signers.py | 5 +- ibm_botocore/utils.py | 372 +++++++++++- tests/__init__.py | 121 +++- tests/functional/test_waiter_config.py | 2 +- tests/integration/test_client.py | 57 -- tests/integration/test_client_http.py | 5 +- tests/integration/test_ec2.py | 69 +-- tests/integration/test_elastictranscoder.py | 2 + tests/integration/test_s3.py | 203 +++++-- tests/integration/test_smoke.py | 30 +- tests/unit/auth/test_signers.py | 14 - tests/unit/test_args.py | 112 ++-- tests/unit/test_client.py | 172 +----- tests/unit/test_config_provider.py | 290 +++++++-- tests/unit/test_credentials.py | 210 ++++++- tests/unit/test_endpoint.py | 11 +- tests/unit/test_http_session.py | 18 +- tests/unit/test_session.py | 8 +- tests/unit/test_signers.py | 132 ++--- tests/unit/test_utils.py | 552 +++++++++++++++--- 34 files changed, 2957 insertions(+), 973 deletions(-) create mode 100644 error] mode change 100644 => 100755 tests/unit/test_signers.py diff --git a/CHANGELOG.md b/CHANGELOG.md index fabc465..8ade8b7 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # CHANGELOG +# 2.6.0 +## Content +### Features +* Support for On-Premise IBM Cloud Object Storage (3.14.8+) +### Defect Fixes +* IBM Python SDK aligned with AWS Python SDK - Boto3(v1.10.36), Botocore(v1.13.36) and S3transfer(v0.2.1) +* Update psutil to use more recent package version (psutil>=5.6.6,<5.6.8) + # 2.5.5 ## Content ### Defect Fixes diff --git a/error] b/error] new file mode 100644 index 0000000..8d71d37 --- /dev/null +++ b/error] @@ -0,0 +1,53 @@ +nose.plugins.cover: ERROR: Coverage not available: unable to import coverage module +Traceback (most recent call last): + File "scripts/ci/run-tests", line 19, in + run('nosetests --with-coverage --cover-erase --cover-package ibm_botocore ' + File "scripts/ci/run-tests", line 16, in run + return check_call(command, shell=True) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/subprocess.py", line 535, in check_call + retcode = call(*popenargs, **kwargs) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/subprocess.py", line 522, in call + return Popen(*popenargs, **kwargs).wait() + File "/root/.pyenv/versions/2.7.8/lib/python2.7/subprocess.py", line 1376, in wait + pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/subprocess.py", line 476, in _eintr_retry_call + return func(*args) +KeyboardInterrupt +Traceback (most recent call last): + File "/root/.pyenv/versions/2.7.8/bin/nosetests", line 11, in + sys.exit(run_exit()) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/core.py", line 121, in __init__ + **extra_args) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/unittest/main.py", line 94, in __init__ + self.parseArgs(argv) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/core.py", line 179, in parseArgs + self.createTests() + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/core.py", line 193, in createTests + self.test = self.testLoader.loadTestsFromNames(self.testNames) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/loader.py", line 481, in loadTestsFromNames + return unittest.TestLoader.loadTestsFromNames(self, names, module) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/unittest/loader.py", line 130, in loadTestsFromNames + suites = [self.loadTestsFromName(name, module) for name in names] + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/loader.py", line 433, in loadTestsFromName + discovered=discovered) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/loader.py", line 354, in loadTestsFromModule + tests.extend(self.loadTestsFromDir(module_path)) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/loader.py", line 183, in loadTestsFromDir + entry_path, discovered=True) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/loader.py", line 418, in loadTestsFromName + addr.filename, addr.module) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/importer.py", line 47, in importFromPath + return self.importFromDir(dir_path, fqname) + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/nose/importer.py", line 94, in importFromDir + mod = load_module(part_fqname, fh, filename, desc) + File "/git/cleversafe/sdk/python/ibm-cos-sdk-python-core/tests/functional/test_waiter_config.py", line 14, in + from jsonschema import Draft4Validator + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/jsonschema/__init__.py", line 12, in + from jsonschema.exceptions import ( + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/jsonschema/exceptions.py", line 6, in + from jsonschema import _utils + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/jsonschema/_utils.py", line 6, in + from jsonschema.compat import str_types, MutableMapping, urlsplit + File "/root/.pyenv/versions/2.7.8/lib/python2.7/site-packages/jsonschema/compat.py", line 1, in + import operator +KeyboardInterrupt diff --git a/ibm_botocore/__init__.py b/ibm_botocore/__init__.py index ad18fb1..5e4d168 100755 --- a/ibm_botocore/__init__.py +++ b/ibm_botocore/__init__.py @@ -17,7 +17,7 @@ import logging __author__ = 'IBM' -__version__ = '2.5.5' +__version__ = '2.6.0' class NullHandler(logging.Handler): diff --git a/ibm_botocore/args.py b/ibm_botocore/args.py index 36abdbd..7958f3a 100755 --- a/ibm_botocore/args.py +++ b/ibm_botocore/args.py @@ -20,6 +20,7 @@ import logging import socket +import ibm_botocore.exceptions import ibm_botocore.serialize import ibm_botocore.utils from ibm_botocore.signers import RequestSigner @@ -30,14 +31,39 @@ logger = logging.getLogger(__name__) +VALID_STS_REGIONAL_ENDPOINTS_CONFIG = [ + 'legacy', + 'regional', +] +LEGACY_GLOBAL_STS_REGIONS = [ + 'ap-northeast-1', + 'ap-south-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'aws-global', + 'ca-central-1', + 'eu-central-1', + 'eu-north-1', + 'eu-west-1', + 'eu-west-2', + 'eu-west-3', + 'sa-east-1', + 'us-east-1', + 'us-east-2', + 'us-west-1', + 'us-west-2', +] + + class ClientArgsCreator(object): def __init__(self, event_emitter, user_agent, response_parser_factory, - loader, exceptions_factory): + loader, exceptions_factory, config_store): self._event_emitter = event_emitter self._user_agent = user_agent self._response_parser_factory = response_parser_factory self._loader = loader self._exceptions_factory = exceptions_factory + self._config_store = config_store def get_client_args(self, service_model, region_name, is_secure, endpoint_url, verify, credentials, scoped_config, @@ -113,9 +139,6 @@ def compute_client_args(self, service_model, client_config, if raw_value is not None: parameter_validation = ibm_botocore.utils.ensure_boolean(raw_value) - endpoint_config = endpoint_bridge.resolve( - service_name, region_name, endpoint_url, is_secure) - # Override the user agent if specified in the client config. user_agent = self._user_agent if client_config is not None: @@ -124,6 +147,13 @@ def compute_client_args(self, service_model, client_config, if client_config.user_agent_extra is not None: user_agent += ' %s' % client_config.user_agent_extra + endpoint_config = self._compute_endpoint_config( + service_name=service_name, + region_name=region_name, + endpoint_url=endpoint_url, + is_secure=is_secure, + endpoint_bridge=endpoint_bridge, + ) # Create a new client config to be passed to the client based # on the final values. We do not want the user to be able # to try to modify an existing client with a client config. @@ -141,8 +171,7 @@ def compute_client_args(self, service_model, client_config, client_cert=client_config.client_cert, inject_host_prefix=client_config.inject_host_prefix, ) - s3_config = self.compute_s3_config(scoped_config, - client_config) + s3_config = self.compute_s3_config(client_config) return { 'service_name': service_name, 'parameter_validation': parameter_validation, @@ -154,29 +183,8 @@ def compute_client_args(self, service_model, client_config, 'socket_options': self._compute_socket_options(scoped_config) } - def compute_s3_config(self, scoped_config, client_config): - s3_configuration = None - - # Check the scoped config first. - if scoped_config is not None: - s3_configuration = scoped_config.get('s3') - # Until we have proper validation of the config file (including - # nested types), we have to account for the fact that the s3 - # key could be parsed as a string, e.g 's3 = foo'. - # In the case we'll ignore the key for now. - if not isinstance(s3_configuration, dict): - logger.debug("The s3 config key is not a dictionary type, " - "ignoring its value of: %s", s3_configuration) - s3_configuration = None - - # Convert logic for several s3 keys in the scoped config - # so that the various strings map to the appropriate boolean value. - if s3_configuration: - boolean_keys = ['use_accelerate_endpoint', - 'use_dualstack_endpoint', - 'payload_signing_enabled'] - s3_configuration = self._convert_config_to_bool( - s3_configuration, boolean_keys) + def compute_s3_config(self, client_config): + s3_configuration = self._config_store.get_config_variable('s3') # Next specific client config values takes precedence over # specific values in the scoped config. @@ -194,14 +202,41 @@ def compute_s3_config(self, scoped_config, client_config): return s3_configuration - def _convert_config_to_bool(self, config_dict, keys): - # Make sure any further modifications to this section of the config - # will not affect the scoped config by making a copy of it. - config_copy = config_dict.copy() - present_keys = [k for k in keys if k in config_copy] - for key in present_keys: - config_copy[key] = ibm_botocore.utils.ensure_boolean(config_copy[key]) - return config_copy + def _compute_endpoint_config(self, service_name, region_name, endpoint_url, + is_secure, endpoint_bridge): + endpoint_config = endpoint_bridge.resolve( + service_name, region_name, endpoint_url, is_secure) + if self._should_set_global_sts_endpoint( + service_name, region_name, endpoint_url): + self._set_global_sts_endpoint(endpoint_config, is_secure) + return endpoint_config + + def _should_set_global_sts_endpoint(self, service_name, region_name, + endpoint_url): + if service_name != 'sts': + return False + if endpoint_url: + return False + return ( + self._get_sts_regional_endpoints_config() == 'legacy' and + region_name in LEGACY_GLOBAL_STS_REGIONS + ) + + def _get_sts_regional_endpoints_config(self): + sts_regional_endpoints_config = self._config_store.get_config_variable( + 'sts_regional_endpoints') + if not sts_regional_endpoints_config: + sts_regional_endpoints_config = 'legacy' + if sts_regional_endpoints_config not in \ + VALID_STS_REGIONAL_ENDPOINTS_CONFIG: + raise ibm_botocore.exceptions.InvalidSTSRegionalEndpointsConfigError( + sts_regional_endpoints_config=sts_regional_endpoints_config) + return sts_regional_endpoints_config + + def _set_global_sts_endpoint(self, endpoint_config, is_secure): + scheme = 'https' if is_secure else 'http' + endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme + endpoint_config['signing_region'] = 'us-east-1' def _get_default_s3_region(self, service_name, endpoint_bridge): # If a user is providing a custom URL, the endpoint resolver will diff --git a/ibm_botocore/auth.py b/ibm_botocore/auth.py index 641ec04..385a902 100644 --- a/ibm_botocore/auth.py +++ b/ibm_botocore/auth.py @@ -411,19 +411,6 @@ def _set_necessary_date_headers(self, request): class S3SigV4Auth(SigV4Auth): - def __init__(self, credentials, service_name, region_name): - super(S3SigV4Auth, self).__init__( - credentials, service_name, region_name) - self._default_region_name = region_name - - def add_auth(self, request): - # If we ever decide to share auth sessions, this could potentially be - # a source of concurrency bugs. - signing_context = request.context.get('signing', {}) - self._region_name = signing_context.get( - 'region', self._default_region_name) - super(S3SigV4Auth, self).add_auth(request) - def _modify_request_before_signing(self, request): super(S3SigV4Auth, self)._modify_request_before_signing(request) if 'X-Amz-Content-SHA256' in request.headers: diff --git a/ibm_botocore/client.py b/ibm_botocore/client.py index 61fd9f6..1d197d7 100755 --- a/ibm_botocore/client.py +++ b/ibm_botocore/client.py @@ -26,13 +26,10 @@ from ibm_botocore.paginate import Paginator from ibm_botocore.utils import CachedProperty from ibm_botocore.utils import get_service_module_name -from ibm_botocore.utils import switch_host_s3_accelerate from ibm_botocore.utils import S3RegionRedirector -from ibm_botocore.utils import fix_s3_host -from ibm_botocore.utils import switch_to_virtual_host_style -from ibm_botocore.utils import S3_ACCELERATE_WHITELIST +from ibm_botocore.utils import S3ArnParamHandler +from ibm_botocore.utils import S3EndpointSetter from ibm_botocore.args import ClientArgsCreator -from ibm_botocore.compat import urlsplit from ibm_botocore import UNSIGNED # Keep this imported. There's pre-existing code that uses # "from ibm_botocore.client import Config". @@ -168,104 +165,22 @@ def _register_endpoint_discovery(self, client, endpoint_url, config): events.register('before-parameter-build', block_endpoint_discovery_required_operations) - def _register_s3_events(self, client, endpoint_bridge, endpoint_url, client_config, scoped_config): if client.meta.service_model.service_name != 's3': return S3RegionRedirector(endpoint_bridge, client).register() - self._set_s3_addressing_style( - endpoint_url, client.meta.config.s3, client.meta.events, - client.meta.partition - ) - # Enable accelerate if the configuration is set to to true or the - # endpoint being used matches one of the accelerate endpoints. - if self._is_s3_accelerate(endpoint_url, client.meta.config.s3): - # Also make sure that the hostname gets switched to - # s3-accelerate.amazonaws.com - client.meta.events.register_first( - 'before-sign.s3', switch_host_s3_accelerate) - + S3ArnParamHandler().register(client.meta.events) + S3EndpointSetter( + endpoint_resolver=self._endpoint_resolver, + region=client.meta.region_name, + s3_config=client.meta.config.s3, + endpoint_url=endpoint_url, + partition=client.meta.partition + ).register(client.meta.events) self._set_s3_presign_signature_version( client.meta, client_config, scoped_config) - def _set_s3_addressing_style(self, endpoint_url, s3_config, event_emitter, - partition): - if s3_config is None: - s3_config = {} - - addressing_style = self._get_s3_addressing_style( - endpoint_url, s3_config) - handler = self._get_s3_addressing_handler( - endpoint_url, s3_config, addressing_style, partition) - if handler is not None: - event_emitter.register('before-sign.s3', handler) - - def _get_s3_addressing_style(self, endpoint_url, s3_config): - # Use virtual host style addressing if accelerate is enabled or if - # the given endpoint url is an accelerate endpoint. - accelerate = s3_config.get('use_accelerate_endpoint', False) - if accelerate or self._is_s3_accelerate(endpoint_url, s3_config): - return 'virtual' - - # If a particular addressing style is configured, use it. - configured_addressing_style = s3_config.get('addressing_style') - if configured_addressing_style: - return configured_addressing_style - - def _get_s3_addressing_handler(self, endpoint_url, s3_config, - addressing_style, partition): - # If virtual host style was configured, use it regardless of whether - # or not the bucket looks dns compatible. - if addressing_style == 'virtual': - logger.debug("Using S3 virtual host style addressing.") - return switch_to_virtual_host_style - - # If path style is configured, no additional steps are needed. If - # endpoint_url was specified, don't default to virtual. We could - # potentially default provided endpoint urls to virtual hosted - # style, but for now it is avoided. - if addressing_style == 'path' or endpoint_url is not None: - logger.debug("Using S3 path style addressing.") - return None - - logger.debug("Defaulting to S3 virtual host style addressing with " - "path style addressing fallback.") - - # By default, try to use virtual style with path fallback. - return fix_s3_host - - def _is_s3_accelerate(self, endpoint_url, s3_config): - # Accelerate has been explicitly configured. - if s3_config is not None and s3_config.get('use_accelerate_endpoint'): - return True - - # Accelerate mode is turned on automatically if an endpoint url is - # provided that matches the accelerate scheme. - if endpoint_url is None: - return False - - # Accelerate is only valid for Amazon endpoints. - netloc = urlsplit(endpoint_url).netloc - if not netloc.endswith('amazonaws.com'): - return False - - # The first part of the url should always be s3-accelerate. - parts = netloc.split('.') - if parts[0] != 's3-accelerate': - return False - - # Url parts between 's3-accelerate' and 'amazonaws.com' which - # represent different url features. - feature_parts = parts[1:-2] - - # There should be no duplicate url parts. - if len(feature_parts) != len(set(feature_parts)): - return False - - # Remaining parts must all be in the whitelist. - return all(p in S3_ACCELERATE_WHITELIST for p in feature_parts) - def _set_s3_presign_signature_version(self, client_meta, client_config, scoped_config): # This will return the manually configured signature version, or None @@ -322,7 +237,7 @@ def _get_client_args(self, service_model, region_name, is_secure, args_creator = ClientArgsCreator( self._event_emitter, self._user_agent, self._response_parser_factory, self._loader, - self._exceptions_factory) + self._exceptions_factory, config_store=self._config_store) return args_creator.get_client_args( service_model, region_name, is_secure, endpoint_url, verify, credentials, scoped_config, client_config, endpoint_bridge) diff --git a/ibm_botocore/configprovider.py b/ibm_botocore/configprovider.py index 31a4eb6..5e21180 100644 --- a/ibm_botocore/configprovider.py +++ b/ibm_botocore/configprovider.py @@ -13,10 +13,15 @@ """This module contains the inteface for controlling how configuration is loaded. """ +import logging import os from ibm_botocore import utils + +logger = logging.getLogger(__name__) + + #: A default dictionary that maps the logical names for session variables #: to the specific environment variables and configuration file names #: that contain the values for these variables. @@ -82,16 +87,53 @@ 'endpoint_discovery_enabled': ( 'endpoint_discovery_enabled', 'AWS_ENDPOINT_DISCOVERY_ENABLED', False, utils.ensure_boolean), + 'sts_regional_endpoints': ( + 'sts_regional_endpoints', 'AWS_STS_REGIONAL_ENDPOINTS', 'legacy', + None + ), + +} +# A mapping for the s3 specific configuration vars. These are the configuration +# vars that typically go in the s3 section of the config file. This mapping +# follows the same schema as the previous session variable mapping. +DEFAULT_S3_CONFIG_VARS = { + 'addressing_style': ( + ('s3', 'addressing_style'), None, None, None), + 'use_accelerate_endpoint': ( + ('s3', 'use_accelerate_endpoint'), None, None, utils.ensure_boolean + ), + 'use_dualstack_endpoint': ( + ('s3', 'use_dualstack_endpoint'), None, None, utils.ensure_boolean + ), + 'payload_signing_enabled': ( + ('s3', 'payload_signing_enabled'), None, None, utils.ensure_boolean + ), + 'use_arn_region': ( + ['s3_use_arn_region', + ('s3', 'use_arn_region')], + 'AWS_S3_USE_ARN_REGION', None, utils.ensure_boolean + ) } -def create_botocore_default_config_mapping(chain_builder): +def create_botocore_default_config_mapping(session): + chain_builder = ConfigChainFactory(session=session) + config_mapping = _create_config_chain_mapping( + chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES) + config_mapping['s3'] = SectionConfigProvider( + 's3', session, _create_config_chain_mapping( + chain_builder, DEFAULT_S3_CONFIG_VARS) + ) + return config_mapping + + +def _create_config_chain_mapping(chain_builder, config_variables): mapping = {} - for logical_name, config in BOTOCORE_DEFAUT_SESSION_VARIABLES.items(): + for logical_name, config in config_variables.items(): mapping[logical_name] = chain_builder.create_config_chain( instance_name=logical_name, env_var_names=config[1], - config_property_name=config[0], + config_property_names=config[0], default=config[2], conversion_func=config[3] ) @@ -122,7 +164,7 @@ def __init__(self, session, environ=None): self._environ = environ def create_config_chain(self, instance_name=None, env_var_names=None, - config_property_name=None, default=None, + config_property_names=None, default=None, conversion_func=None): """Build a config chain following the standard ibm_botocore pattern. @@ -142,10 +184,11 @@ def create_config_chain(self, instance_name=None, env_var_names=None, search for this value. They are searched in order. If it is None it will not be added to the chain. - :type config_property_name: str or None - :param config_property_name: The string name of the key in the config - file for this config option. If it is None it will not be added to - the chain. + :type config_property_names: str/tuple or list of str/tuple or None + :param config_property_names: One of more strings or tuples + representing the name of the key in the config file for this + config option. They are searched in order. If it is None it will + not be added to the chain. :type default: Any :param default: Any constant value to be returned. @@ -169,18 +212,10 @@ def create_config_chain(self, instance_name=None, env_var_names=None, ) ) if env_var_names is not None: - providers.append( - EnvironmentProvider( - names=env_var_names, - env=self._environ, - ) - ) - if config_property_name is not None: - providers.append( - ScopedConfigProvider( - config_var_name=config_property_name, - session=self._session, - ) + providers.extend(self._get_env_providers(env_var_names)) + if config_property_names is not None: + providers.extend( + self._get_scoped_config_providers(config_property_names) ) if default is not None: providers.append(ConstantProvider(value=default)) @@ -190,6 +225,29 @@ def create_config_chain(self, instance_name=None, env_var_names=None, conversion_func=conversion_func, ) + def _get_env_providers(self, env_var_names): + env_var_providers = [] + if not isinstance(env_var_names, list): + env_var_names = [env_var_names] + for env_var_name in env_var_names: + env_var_providers.append( + EnvironmentProvider(name=env_var_name, env=self._environ) + ) + return env_var_providers + + def _get_scoped_config_providers(self, config_property_names): + scoped_config_providers = [] + if not isinstance(config_property_names, list): + config_property_names = [config_property_names] + for config_property_name in config_property_names: + scoped_config_providers.append( + ScopedConfigProvider( + config_var_name=config_property_name, + session=self._session, + ) + ) + return scoped_config_providers + class ConfigValueStore(object): """The ConfigValueStore object stores configuration values.""" @@ -370,9 +428,11 @@ class ScopedConfigProvider(BaseProvider): def __init__(self, config_var_name, session): """Initialize ScopedConfigProvider. - :type config_var_name: str + :type config_var_name: str or tuple :param config_var_name: The name of the config variable to load from - the configuration file. + the configuration file. If the value is a tuple, it must only + consist of two items, where the first item represents the section + and the second item represents the config var name in the section. :type session: :class:`ibm_botocore.session.Session` :param session: The ibm_botocore session to get the loaded configuration @@ -383,9 +443,13 @@ def __init__(self, config_var_name, session): def provide(self): """Provide a value from a config file property.""" - config = self._session.get_scoped_config() - value = config.get(self._config_var_name) - return value + scoped_config = self._session.get_scoped_config() + if isinstance(self._config_var_name, tuple): + section_config = scoped_config.get(self._config_var_name[0]) + if not isinstance(section_config, dict): + return None + return section_config.get(self._config_var_name[1]) + return scoped_config.get(self._config_var_name) def __repr__(self): return 'ScopedConfigProvider(config_var_name=%s, session=%s)' % ( @@ -396,35 +460,66 @@ def __repr__(self): class EnvironmentProvider(BaseProvider): """This class loads config values from environment variables.""" - def __init__(self, names, env): + def __init__(self, name, env): """Initialize with the keys in the dictionary to check. - :type names: str or list - :param names: If this is a str, the key with that name will - be loaded and returned. If this variable is - a list, then it must be a list of str. The same process will be - repeated for each string in the list, the first that returns non - None will be returned. + :type name: str + :param name: The key with that name will be loaded and returned. :type env: dict :param env: Environment variables dictionary to get variables from. """ - self._names = names + self._name = name self._env = env def provide(self): """Provide a config value from a source dictionary.""" - names = self._names - if not isinstance(names, list): - names = [names] - for name in names: - if name in self._env: - return self._env[name] + if self._name in self._env: + return self._env[self._name] return None def __repr__(self): - return 'EnvironmentProvider(names=%s, env=%s)' % (self._names, - self._env) + return 'EnvironmentProvider(name=%s, env=%s)' % (self._name, self._env) + + +class SectionConfigProvider(BaseProvider): + """Provides a dictionary from a section in the scoped config + + This is useful for retrieving scoped config variables (i.e. s3) that have + their own set of config variables and resolving logic. + """ + def __init__(self, section_name, session, override_providers=None): + self._section_name = section_name + self._session = session + self._scoped_config_provider = ScopedConfigProvider( + self._section_name, self._session) + self._override_providers = override_providers + if self._override_providers is None: + self._override_providers = {} + + def provide(self): + section_config = self._scoped_config_provider.provide() + if section_config and not isinstance(section_config, dict): + logger.debug("The %s config key is not a dictionary type, " + "ignoring its value of: %s", self._section_name, + section_config) + return None + for section_config_var, provider in self._override_providers.items(): + provider_val = provider.provide() + if provider_val is not None: + if section_config is None: + section_config = {} + section_config[section_config_var] = provider_val + return section_config + + def __repr__(self): + return ( + 'SectionConfigProvider(section_name=%s, ' + 'session=%s, override_providers=%s)' % ( + self._section_name, self._session, + self._override_providers, + ) + ) class ConstantProvider(BaseProvider): diff --git a/ibm_botocore/credentials.py b/ibm_botocore/credentials.py index b7914e0..f0db16b 100755 --- a/ibm_botocore/credentials.py +++ b/ibm_botocore/credentials.py @@ -22,15 +22,16 @@ from collections import namedtuple from copy import deepcopy from hashlib import sha1 -import json from dateutil.parser import parse from dateutil.tz import tzlocal import ibm_botocore.configloader import ibm_botocore.compat +from ibm_botocore import UNSIGNED from ibm_botocore.compat import total_seconds from ibm_botocore.compat import compat_shell_split +from ibm_botocore.config import Config from ibm_botocore.exceptions import UnknownCredentialError from ibm_botocore.exceptions import PartialCredentialsError from ibm_botocore.exceptions import ConfigNotFound @@ -41,6 +42,7 @@ from ibm_botocore.exceptions import CredentialRetrievalError from ibm_botocore.utils import InstanceMetadataFetcher, parse_key_val_file from ibm_botocore.utils import ContainerMetadataFetcher +from ibm_botocore.utils import FileWebIdentityTokenLoader logger = logging.getLogger(__name__) @@ -48,7 +50,7 @@ ['access_key', 'secret_key', 'token']) -def create_credential_resolver(session, cache=None): +def create_credential_resolver(session, cache=None, region_name=None): """Create a default credential resolver. This creates a pre-configured credential resolver @@ -62,6 +64,8 @@ def create_credential_resolver(session, cache=None): config_file = session.get_config_variable('config_file') metadata_timeout = session.get_config_variable('metadata_service_timeout') num_attempts = session.get_config_variable('metadata_service_num_attempts') + disable_env_vars = session.instance_variables().get('profile') is not None + if cache is None: cache = {} @@ -75,34 +79,38 @@ def create_credential_resolver(session, cache=None): num_attempts=num_attempts, user_agent=session.user_agent()) ) + + profile_provider_builder = ProfileProviderBuilder( + session, cache=cache, region_name=region_name) assume_role_provider = AssumeRoleProvider( load_config=lambda: session.full_config, - client_creator=session.create_client, + client_creator=_get_client_creator(session, region_name), cache=cache, profile_name=profile_name, credential_sourcer=CanonicalNameCredentialSourcer([ env_provider, container_provider, instance_metadata_provider - ]) + ]), + profile_provider_builder=profile_provider_builder, ) - providers = [ + + pre_profile = [ env_provider, cos_provider, assume_role_provider, - SharedCredentialProvider( - creds_filename=credential_file, - profile_name=profile_name - ), - ProcessProvider(profile_name=profile_name, - load_config=lambda: session.full_config), - # The new config file has precedence over the legacy - # config file. - ConfigProvider(config_filename=config_file, profile_name=profile_name), + ] + profile_providers = profile_provider_builder.providers( + profile_name=profile_name, + disable_env_vars=disable_env_vars, + ) + post_profile = [ OriginalEC2Provider(), BotoProvider(), container_provider, - instance_metadata_provider + instance_metadata_provider, ] - if session.instance_variables().get('profile') is not None: + providers = pre_profile + profile_providers + post_profile + + if disable_env_vars: # An explicitly provided profile will negate an EnvProvider. # We will defer to providers that understand the "profile" # concept to retrieve credentials. @@ -128,6 +136,62 @@ def create_credential_resolver(session, cache=None): return resolver +class ProfileProviderBuilder(object): + """This class handles the creation of profile based providers. + + NOTE: This class is only intended for internal use. + + This class handles the creation and ordering of the various credential + providers that primarly source their configuration from the shared config. + This is needed to enable sharing between the default credential chain and + the source profile chain created by the assume role provider. + """ + def __init__(self, session, cache=None, region_name=None): + self._session = session + self._cache = cache + self._region_name = region_name + + def providers(self, profile_name, disable_env_vars=False): + return [ + self._create_web_identity_provider( + profile_name, disable_env_vars, + ), + self._create_shared_credential_provider(profile_name), + self._create_process_provider(profile_name), + self._create_config_provider(profile_name), + ] + + def _create_process_provider(self, profile_name): + return ProcessProvider( + profile_name=profile_name, + load_config=lambda: self._session.full_config, + ) + + def _create_shared_credential_provider(self, profile_name): + credential_file = self._session.get_config_variable('credentials_file') + return SharedCredentialProvider( + profile_name=profile_name, + creds_filename=credential_file, + ) + + def _create_config_provider(self, profile_name): + config_file = self._session.get_config_variable('config_file') + return ConfigProvider( + profile_name=profile_name, + config_filename=config_file, + ) + + def _create_web_identity_provider(self, profile_name, disable_env_vars): + return AssumeRoleWithWebIdentityProvider( + load_config=lambda: self._session.full_config, + client_creator=_get_client_creator( + self._session, self._region_name), + cache=self._cache, + profile_name=profile_name, + disable_env_vars=disable_env_vars, + ) + + def get_credentials(session): resolver = create_credential_resolver(session) return resolver.load_credentials() @@ -151,6 +215,17 @@ def _serialize_if_needed(value, iso=False): return value +def _get_client_creator(session, region_name): + def client_creator(service_name, **kwargs): + create_client_kwargs = { + 'region_name': region_name + } + create_client_kwargs.update(**kwargs) + return session.create_client(service_name, **create_client_kwargs) + + return client_creator + + def create_assume_role_refresher(client, params): def refresh(): response = client.assume_role(**params) @@ -550,11 +625,15 @@ def refresh_needed(self, refresh_in=None): class CachedCredentialFetcher(object): - def __init__(self, cache=None, expiry_window_seconds=60 * 15): + DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15 + + def __init__(self, cache=None, expiry_window_seconds=None): if cache is None: cache = {} self._cache = cache self._cache_key = self._create_cache_key() + if expiry_window_seconds is None: + expiry_window_seconds = self.DEFAULT_EXPIRY_WINDOW_SECONDS self._expiry_window_seconds = expiry_window_seconds def _create_cache_key(self): @@ -614,10 +693,59 @@ def _is_expired(self, credentials): return seconds < self._expiry_window_seconds -class AssumeRoleCredentialFetcher(CachedCredentialFetcher): +class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher): + def __init__(self, client_creator, role_arn, extra_args=None, + cache=None, expiry_window_seconds=None): + self._client_creator = client_creator + self._role_arn = role_arn + + if extra_args is None: + self._assume_kwargs = {} + else: + self._assume_kwargs = deepcopy(extra_args) + self._assume_kwargs['RoleArn'] = self._role_arn + + self._role_session_name = self._assume_kwargs.get('RoleSessionName') + self._using_default_session_name = False + if not self._role_session_name: + self._generate_assume_role_name() + + super(BaseAssumeRoleCredentialFetcher, self).__init__( + cache, expiry_window_seconds + ) + + def _generate_assume_role_name(self): + self._role_session_name = 'botocore-session-%s' % (int(time.time())) + self._assume_kwargs['RoleSessionName'] = self._role_session_name + self._using_default_session_name = True + + def _create_cache_key(self): + """Create a predictable cache key for the current configuration. + + The cache key is intended to be compatible with file names. + """ + args = deepcopy(self._assume_kwargs) + + # The role session name gets randomly generated, so we don't want it + # in the hash. + if self._using_default_session_name: + del args['RoleSessionName'] + + if 'Policy' in args: + # To have a predictable hash, the keys of the policy must be + # sorted, so we have to load it here to make sure it gets sorted + # later on. + args['Policy'] = json.loads(args['Policy']) + + args = json.dumps(args, sort_keys=True) + argument_hash = sha1(args.encode('utf-8')).hexdigest() + return self._make_file_safe(argument_hash) + + +class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher): def __init__(self, client_creator, source_credentials, role_arn, extra_args=None, mfa_prompter=None, cache=None, - expiry_window_seconds=60 * 15): + expiry_window_seconds=None): """ :type client_creator: callable :param client_creator: A callable that creates a client taking @@ -649,54 +777,16 @@ def __init__(self, client_creator, source_credentials, role_arn, :type expiry_window_seconds: int :param expiry_window_seconds: The amount of time, in seconds, """ - self._client_creator = client_creator self._source_credentials = source_credentials - self._role_arn = role_arn - - if extra_args is None: - self._assume_kwargs = {} - else: - self._assume_kwargs = deepcopy(extra_args) - self._assume_kwargs['RoleArn'] = self._role_arn - - self._role_session_name = self._assume_kwargs.get('RoleSessionName') - self._using_default_session_name = False - if not self._role_session_name: - self._role_session_name = 'botocore-session-%s' % ( - int(time.time())) - self._assume_kwargs['RoleSessionName'] = self._role_session_name - self._using_default_session_name = True - self._mfa_prompter = mfa_prompter if self._mfa_prompter is None: self._mfa_prompter = getpass.getpass super(AssumeRoleCredentialFetcher, self).__init__( - cache, expiry_window_seconds + client_creator, role_arn, extra_args=extra_args, + cache=cache, expiry_window_seconds=expiry_window_seconds ) - def _create_cache_key(self): - """Create a predictable cache key for the current configuration. - - The cache key is intended to be compatible with file names. - """ - args = deepcopy(self._assume_kwargs) - - # The role session name gets randomly generated, so we don't want it - # in the hash. - if self._using_default_session_name: - del args['RoleSessionName'] - - if 'Policy' in args: - # To have a predictable hash, the keys of the policy must be - # sorted, so we have to load it here to make sure it gets sorted - # later on. - args['Policy'] = json.loads(args['Policy']) - - args = json.dumps(args, sort_keys=True) - argument_hash = sha1(args.encode('utf-8')).hexdigest() - return self._make_file_safe(argument_hash) - def _get_credentials(self): """Get credentials by calling assume role.""" kwargs = self._assume_role_kwargs() @@ -732,6 +822,63 @@ def _create_client(self): ) +class AssumeRoleWithWebIdentityCredentialFetcher( + BaseAssumeRoleCredentialFetcher +): + def __init__(self, client_creator, web_identity_token_loader, role_arn, + extra_args=None, cache=None, expiry_window_seconds=None): + """ + :type client_creator: callable + :param client_creator: A callable that creates a client taking + arguments like ``Session.create_client``. + + :type web_identity_token_loader: callable + :param web_identity_token_loader: A callable that takes no arguments + and returns a web identity token str. + + :type role_arn: str + :param role_arn: The ARN of the role to be assumed. + + :type extra_args: dict + :param extra_args: Any additional arguments to add to the assume + role request using the format of the ibm_botocore operation. + Possible keys include, but may not be limited to, + DurationSeconds, Policy, SerialNumber, ExternalId and + RoleSessionName. + + :type cache: dict + :param cache: An object that supports ``__getitem__``, + ``__setitem__``, and ``__contains__``. An example of this is + the ``JSONFileCache`` class in aws-cli. + + :type expiry_window_seconds: int + :param expiry_window_seconds: The amount of time, in seconds, + """ + self._web_identity_token_loader = web_identity_token_loader + + super(AssumeRoleWithWebIdentityCredentialFetcher, self).__init__( + client_creator, role_arn, extra_args=extra_args, + cache=cache, expiry_window_seconds=expiry_window_seconds + ) + + def _get_credentials(self): + """Get credentials by calling assume role.""" + kwargs = self._assume_role_kwargs() + # Assume role with web identity does not require credentials other than + # the token, explicitly configure the client to not sign requests. + config = Config(signature_version=UNSIGNED) + client = self._client_creator('sts', config=config) + return client.assume_role_with_web_identity(**kwargs) + + def _assume_role_kwargs(self): + """Get the arguments for assume role based on current configuration.""" + assume_role_kwargs = deepcopy(self._assume_kwargs) + identity_token = self._web_identity_token_loader() + assume_role_kwargs['WebIdentityToken'] = identity_token + + return assume_role_kwargs + + class CredentialProvider(object): # A short name to identify the provider within ibm_botocore. METHOD = None @@ -935,17 +1082,19 @@ def _build_mapping(self, mapping): 'token', self.TOKENS) if not isinstance(var_mapping['token'], list): var_mapping['token'] = [var_mapping['token']] - var_mapping['expiry_time'] = mapping.get( 'expiry_time', self.EXPIRY_TIME) - return var_mapping def load(self): """ Search for credentials in explicit environment variables. """ - if self._mapping['ibm_api_key_id'] in self.environ: + + access_key = self.environ.get(self._mapping['access_key'], '') + ibm_api_key_id = self.environ.get(self._mapping['ibm_api_key_id'], '') + + if ibm_api_key_id: logger.info('Found IBM credentials in environment variables.') ibm_api_key_id, ibm_service_instance_id, ibm_auth_endpoint = self._extract_creds_from_mapping( self.environ, self._mapping['ibm_api_key_id'], @@ -956,7 +1105,7 @@ def load(self): service_instance_id=ibm_service_instance_id, auth_endpoint=ibm_auth_endpoint, method=self.METHOD) - elif self._mapping['access_key'] in self.environ: + elif access_key: logger.info('Found credentials in environment variables.') fetcher = self._create_credentials_fetcher() credentials = fetcher(require_expiry=False) @@ -985,30 +1134,32 @@ def _create_credentials_fetcher(self): def fetch_credentials(require_expiry=True): credentials = {} - access_key = environ.get(mapping['access_key']) - if access_key is None: + access_key = environ.get(mapping['access_key'], '') + if not access_key: raise PartialCredentialsError( provider=method, cred_var=mapping['access_key']) credentials['access_key'] = access_key - secret_key = environ.get(mapping['secret_key']) - if secret_key is None: + secret_key = environ.get(mapping['secret_key'], '') + if not secret_key: raise PartialCredentialsError( provider=method, cred_var=mapping['secret_key']) credentials['secret_key'] = secret_key - token = None + credentials['token'] = None for token_env_var in mapping['token']: - if token_env_var in environ: - token = environ[token_env_var] + token = environ.get(token_env_var, '') + if token: + credentials['token'] = token break - credentials['token'] = token - expiry_time = environ.get(mapping['expiry_time']) - if require_expiry and expiry_time is None: + credentials['expiry_time'] = None + expiry_time = environ.get(mapping['expiry_time'], '') + if expiry_time: + credentials['expiry_time'] = expiry_time + if require_expiry and not expiry_time: raise PartialCredentialsError( provider=method, cred_var=mapping['expiry_time']) - credentials['expiry_time'] = expiry_time return credentials @@ -1122,6 +1273,7 @@ def _get_session_token(self, config): class ConfigProvider(CredentialProvider): """INI based config provider with profile sections.""" METHOD = 'config-file' + CANONICAL_NAME = 'SharedConfig' ACCESS_KEY = 'aws_access_key_id' SECRET_KEY = 'aws_secret_access_key' @@ -1251,13 +1403,15 @@ class AssumeRoleProvider(CredentialProvider): # provider as much as possible. CANONICAL_NAME = None ROLE_CONFIG_VAR = 'role_arn' + WEB_IDENTITY_TOKE_FILE_VAR = 'web_identity_token_file' # Credentials are considered expired (and will be refreshed) once the total # remaining time left until the credentials expires is less than the # EXPIRY_WINDOW. EXPIRY_WINDOW_SECONDS = 60 * 15 def __init__(self, load_config, client_creator, cache, profile_name, - prompter=getpass.getpass, credential_sourcer=None): + prompter=getpass.getpass, credential_sourcer=None, + profile_provider_builder=None): """ :type load_config: callable :param load_config: A function that accepts no arguments, and @@ -1307,6 +1461,7 @@ def __init__(self, load_config, client_creator, cache, profile_name, # instantiated). self._loaded_config = {} self._credential_sourcer = credential_sourcer + self._profile_provider_builder = profile_provider_builder self._visited_profiles = [self._profile_name] def load(self): @@ -1317,7 +1472,14 @@ def load(self): return self._load_creds_via_assume_role(self._profile_name) def _has_assume_role_config_vars(self, profile): - return self.ROLE_CONFIG_VAR in profile + return ( + self.ROLE_CONFIG_VAR in profile and + # We need to ensure this provider doesn't look at a profile when + # the profile has configuration for web identity. Simply relying on + # the order in the credential chain is insufficient as it doesn't + # prevent the case when we're doing an assume role chain. + self.WEB_IDENTITY_TOKE_FILE_VAR not in profile + ) def _load_creds_via_assume_role(self, profile_name): role_config = self._get_role_config(profile_name) @@ -1446,16 +1608,6 @@ def _validate_source_profile(self, parent_profile_name, source_profile = profiles[source_profile_name] - # Ensure the profile has valid credential type - if not self._source_profile_has_credentials(source_profile): - raise InvalidConfigError( - error_msg=( - 'The source_profile "%s" must specify either static ' - 'credentials or an assume role configuration' % ( - source_profile_name) - ) - ) - # Make sure we aren't going into an infinite loop. If we haven't # visited the profile yet, we're good. if source_profile_name not in self._visited_profiles: @@ -1499,8 +1651,29 @@ def _resolve_credentials_from_profile(self, profile_name): profiles = self._loaded_config.get('profiles', {}) profile = profiles[profile_name] - if self._has_static_credentials(profile): + if self._has_static_credentials(profile) and \ + not self._profile_provider_builder: + # This is only here for backwards compatibility. If this provider + # isn't given a profile provider builder we still want to be able + # handle the basic static credential case as we would before the + # provile provider builder parameter was added. return self._resolve_static_credentials_from_profile(profile) + elif self._has_static_credentials(profile) or \ + not self._has_assume_role_config_vars(profile): + profile_providers = self._profile_provider_builder.providers( + profile_name=profile_name, + disable_env_vars=True, + ) + profile_chain = CredentialResolver(profile_providers) + credentials = profile_chain.load_credentials() + if credentials is None: + error_message = ( + 'The source profile "%s" must have credentials.' + ) + raise InvalidConfigError( + error_msg=error_message % profile_name, + ) + return credentials return self._load_creds_via_assume_role(profile_name) @@ -1530,6 +1703,95 @@ def _resolve_credentials_from_source(self, credential_source, return credentials +class AssumeRoleWithWebIdentityProvider(CredentialProvider): + METHOD = 'assume-role-with-web-identity' + CANONICAL_NAME = None + _CONFIG_TO_ENV_VAR = { + 'web_identity_token_file': 'AWS_WEB_IDENTITY_TOKEN_FILE', + 'role_session_name': 'AWS_ROLE_SESSION_NAME', + 'role_arn': 'AWS_ROLE_ARN', + } + + def __init__( + self, + load_config, + client_creator, + profile_name, + cache=None, + disable_env_vars=False, + token_loader_cls=None, + ): + self.cache = cache + self._load_config = load_config + self._client_creator = client_creator + self._profile_name = profile_name + self._profile_config = None + self._disable_env_vars = disable_env_vars + if token_loader_cls is None: + token_loader_cls = FileWebIdentityTokenLoader + self._token_loader_cls = token_loader_cls + + def load(self): + return self._assume_role_with_web_identity() + + def _get_profile_config(self, key): + if self._profile_config is None: + loaded_config = self._load_config() + profiles = loaded_config.get('profiles', {}) + self._profile_config = profiles.get(self._profile_name, {}) + return self._profile_config.get(key) + + def _get_env_config(self, key): + if self._disable_env_vars: + return None + env_key = self._CONFIG_TO_ENV_VAR.get(key) + if env_key and env_key in os.environ: + return os.environ[env_key] + return None + + def _get_config(self, key): + env_value = self._get_env_config(key) + if env_value is not None: + return env_value + return self._get_profile_config(key) + + def _assume_role_with_web_identity(self): + token_path = self._get_config('web_identity_token_file') + if not token_path: + return None + token_loader = self._token_loader_cls(token_path) + + role_arn = self._get_config('role_arn') + if not role_arn: + error_msg = ( + 'The provided profile or the current environment is ' + 'configured to assume role with web identity but has no ' + 'role ARN configured. Ensure that the profile has the role_arn' + 'configuration set or the AWS_ROLE_ARN env var is set.' + ) + raise InvalidConfigError(error_msg=error_msg) + + extra_args = {} + role_session_name = self._get_config('role_session_name') + if role_session_name is not None: + extra_args['RoleSessionName'] = role_session_name + + fetcher = AssumeRoleWithWebIdentityCredentialFetcher( + client_creator=self._client_creator, + web_identity_token_loader=token_loader, + role_arn=role_arn, + extra_args=extra_args, + cache=self.cache, + ) + # The initial credentials are empty and the expiration time is set + # to now so that we can delay the call to assume role until it is + # strictly needed. + return DeferredRefreshableCredentials( + method=self.METHOD, + refresh_using=fetcher.fetch_credentials, + ) + + class CanonicalNameCredentialSourcer(object): def __init__(self, providers): self._providers = providers diff --git a/ibm_botocore/data/s3/2006-03-01/service-2.json b/ibm_botocore/data/s3/2006-03-01/service-2.json index f3dc5f7..4e74fdd 100755 --- a/ibm_botocore/data/s3/2006-03-01/service-2.json +++ b/ibm_botocore/data/s3/2006-03-01/service-2.json @@ -167,6 +167,17 @@ "input":{"shape":"DeleteBucketReplicationRequest"}, "documentation":"Deletes the replication configuration from the bucket." }, + "DeleteBucketTagging":{ + "name":"DeleteBucketTagging", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?tagging", + "responseCode":204 + }, + "input":{"shape":"DeleteBucketTaggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html", + "documentation":"

Deletes the tags from the bucket.

" + }, "DeleteBucketWebsite":{ "name":"DeleteBucketWebsite", "http":{ @@ -332,6 +343,28 @@ "output":{"shape":"GetBucketReplicationOutput"}, "documentation":"Returns the replication configuration of a bucket." }, + "GetBucketTagging":{ + "name":"GetBucketTagging", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"GetBucketTaggingRequest"}, + "output":{"shape":"GetBucketTaggingOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETtagging.html", + "documentation":"

Returns the tag set associated with the bucket.

" + }, + "GetBucketVersioning":{ + "name":"GetBucketVersioning", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?versioning" + }, + "input":{"shape":"GetBucketVersioningRequest"}, + "output":{"shape":"GetBucketVersioningOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html", + "documentation":"

Returns the versioning state of a bucket.

" + }, "GetObject":{ "name":"GetObject", "http":{ @@ -473,6 +506,18 @@ "__documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", "documentation":"This operation lists in-progress multipart uploads." }, + "ListObjectVersions":{ + "name":"ListObjectVersions", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?versions" + }, + "input":{"shape":"ListObjectVersionsRequest"}, + "output":{"shape":"ListObjectVersionsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", + "documentation":"

Returns metadata about all of the versions of objects in a bucket.

", + "alias":"GetBucketObjectVersions" + }, "ListObjects":{ "name":"ListObjects", "http":{ @@ -488,6 +533,19 @@ "documentation":"Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.", "alias":"GetBucket" }, + "ListObjectVersions":{ + "name":"ListObjectVersions", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?versions" + }, + "input":{"shape":"ListObjectVersionsRequest"}, + "output":{"shape":"ListObjectVersionsOutput"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", + "documentation":"

Returns metadata about all of the versions of objects in a bucket.

", + "alias":"GetBucketObjectVersions" + }, + "ListObjectsV2":{ "name":"ListObjectsV2", "http":{ @@ -595,6 +653,26 @@ "input":{"shape":"PutBucketReplicationRequest"}, "documentation":"Creates a new replication configuration (or replaces an existing one, if present)." }, + "PutBucketTagging":{ + "name":"PutBucketTagging", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?tagging" + }, + "input":{"shape":"PutBucketTaggingRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", + "documentation":"

Sets the tags for a bucket.

" + }, + "PutBucketVersioning":{ + "name":"PutBucketVersioning", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?versioning" + }, + "input":{"shape":"PutBucketVersioningRequest"}, + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", + "documentation":"

Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

" + }, "PutObject":{ "name":"PutObject", "http":{ @@ -1018,6 +1096,13 @@ "Retention" ] }, + "BucketVersioningStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Suspended" + ] + }, "Buckets":{ "type":"list", "member":{ @@ -1134,6 +1219,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption" }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

Version of the object.

", + "location":"header", + "locationName":"x-amz-version-id" + }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", "documentation":"Not returned by COS.", @@ -1259,6 +1350,18 @@ "location":"header", "locationName":"x-amz-expiration" }, + "CopySourceVersionId":{ + "shape":"CopySourceVersionId", + "documentation":"

", + "location":"header", + "locationName":"x-amz-copy-source-version-id" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

Version ID of the newly created copy.

", + "location":"header", + "locationName":"x-amz-version-id" + }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", "documentation":"The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256).", @@ -1544,6 +1647,7 @@ "sensitive":true }, "CopySourceSSECustomerKeyMD5":{"type":"string"}, + "CopySourceVersionId":{"type":"string"}, "CreateBucketConfiguration":{ "type":"structure", "members":{ @@ -1971,6 +2075,18 @@ } } }, + "DeleteBucketTaggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

", + "location":"uri", + "locationName":"Bucket" + } + } + }, "DeleteBucketWebsiteRequest":{ "type":"structure", "required":["Bucket"], @@ -2046,6 +2162,12 @@ "location":"header", "locationName":"x-amz-mfa" }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

VersionId used to reference a specific version of the object.

", + "location":"querystring", + "locationName":"versionId" + }, "RequestPayer":{ "shape":"RequestPayer", "documentation":"Ignored by COS if present.", @@ -2384,6 +2506,11 @@ "shape":"BucketName", "location":"uri", "locationName":"Bucket" + }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" } } }, @@ -2572,6 +2699,59 @@ } } }, + "GetBucketTaggingOutput":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{ + "shape":"TagSet", + "documentation":"

" + } + } + }, + "GetBucketTaggingRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

", + "location":"uri", + "locationName":"Bucket" + }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" + } + } + }, + "GetBucketVersioningOutput":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"BucketVersioningStatus", + "documentation":"

The versioning state of the bucket.

" + }, + "MFADelete":{ + "shape":"MFADeleteStatus", + "documentation":"

Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.

", + "locationName":"MfaDelete" + } + } + }, + "GetBucketVersioningRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

", + "location":"uri", + "locationName":"Bucket" + } + } + }, "GetBucketWebsiteOutput":{ "type":"structure", "members":{ @@ -2625,6 +2805,17 @@ "location":"uri", "locationName":"Key" }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

VersionId used to reference a specific version of the object.

", + "location":"querystring", + "locationName":"versionId" + }, "RequestPayer":{ "shape":"RequestPayer", "documentation":"Ignored by COS if present.", @@ -2688,6 +2879,12 @@ "location":"header", "locationName":"x-amz-missing-meta" }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

Version of the object.

", + "location":"header", + "locationName":"x-amz-version-id" + }, "CacheControl":{ "shape":"CacheControl", "documentation":"Specifies caching behavior along the request/reply chain.", @@ -2849,6 +3046,11 @@ "location":"uri", "locationName":"Key" }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" + }, "Range":{ "shape":"Range", "documentation":"Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.", @@ -2891,6 +3093,12 @@ "location":"querystring", "locationName":"response-expires" }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

VersionId used to reference a specific version of the object.

", + "location":"querystring", + "locationName":"versionId" + }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", @@ -3118,6 +3326,12 @@ "location":"header", "locationName":"x-amz-missing-meta" }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

Version of the object.

", + "location":"header", + "locationName":"x-amz-version-id" + }, "CacheControl":{ "shape":"CacheControl", "documentation":"Specifies caching behavior along the request/reply chain.", @@ -3272,12 +3486,23 @@ "location":"uri", "locationName":"Key" }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" + }, "Range":{ "shape":"Range", "documentation":"Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.", "location":"header", "locationName":"Range" }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

VersionId used to reference a specific version of the object.

", + "location":"querystring", + "locationName":"versionId" + }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", "documentation":"Specifies the algorithm to use to when encrypting the object (e.g., AES256).", @@ -3859,6 +4084,11 @@ "shape":"ObjectKey", "location":"uri", "locationName":"Key" + }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" } } }, @@ -3894,6 +4124,11 @@ "location":"querystring", "locationName":"max-uploads" }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" + }, "Prefix":{ "shape":"Prefix", "documentation":"Lists in-progress uploads only for those keys that begin with the specified prefix.", @@ -3908,6 +4143,112 @@ } } }, + "ListObjectVersionsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{ + "shape":"IsTruncated", + "documentation":"

A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.

" + }, + "KeyMarker":{ + "shape":"KeyMarker", + "documentation":"

Marks the last Key returned in a truncated response.

" + }, + "VersionIdMarker":{ + "shape":"VersionIdMarker", + "documentation":"

" + }, + "NextKeyMarker":{ + "shape":"NextKeyMarker", + "documentation":"

Use this value for the key marker request parameter in a subsequent request.

" + }, + "NextVersionIdMarker":{ + "shape":"NextVersionIdMarker", + "documentation":"

Use this value for the next version id marker parameter in a subsequent request.

" + }, + "Versions":{ + "shape":"ObjectVersionList", + "documentation":"

", + "locationName":"Version" + }, + "DeleteMarkers":{ + "shape":"DeleteMarkers", + "documentation":"

", + "locationName":"DeleteMarker" + }, + "Name":{ + "shape":"BucketName", + "documentation":"

" + }, + "Prefix":{ + "shape":"Prefix", + "documentation":"

" + }, + "Delimiter":{ + "shape":"Delimiter", + "documentation":"

" + }, + "MaxKeys":{ + "shape":"MaxKeys", + "documentation":"

" + }, + "CommonPrefixes":{ + "shape":"CommonPrefixList", + "documentation":"

" + }, + "EncodingType":{ + "shape":"EncodingType", + "documentation":"

Encoding type used by Amazon S3 to encode object keys in the response.

" + } + } + }, + "ListObjectVersionsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

", + "location":"uri", + "locationName":"Bucket" + }, + "Delimiter":{ + "shape":"Delimiter", + "documentation":"

A delimiter is a character you use to group keys.

", + "location":"querystring", + "locationName":"delimiter" + }, + "EncodingType":{ + "shape":"EncodingType", + "location":"querystring", + "locationName":"encoding-type" + }, + "KeyMarker":{ + "shape":"KeyMarker", + "documentation":"

Specifies the key to start with when listing objects in a bucket.

", + "location":"querystring", + "locationName":"key-marker" + }, + "MaxKeys":{ + "shape":"MaxKeys", + "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

", + "location":"querystring", + "locationName":"max-keys" + }, + "Prefix":{ + "shape":"Prefix", + "documentation":"

Limits the response to keys that begin with the specified prefix.

", + "location":"querystring", + "locationName":"prefix" + }, + "VersionIdMarker":{ + "shape":"VersionIdMarker", + "documentation":"

Specifies the object version you want to start listing from.

", + "location":"querystring", + "locationName":"version-id-marker" + } + } + }, "ListObjectsOutput":{ "type":"structure", "required":[], @@ -3977,6 +4318,11 @@ "location":"querystring", "locationName":"max-keys" }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" + }, "Prefix":{ "shape":"Prefix", "documentation":"Limits the response to keys that begin with the specified prefix.", @@ -4185,6 +4531,11 @@ "location":"querystring", "locationName":"max-parts" }, + "MirrorDestination":{ + "shape":"MirrorDestination", + "location":"header", + "locationName":"Mirror-Destination" + }, "PartNumberMarker":{ "shape":"PartNumberMarker", "documentation":"Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.", @@ -4301,6 +4652,10 @@ } }, "MetricsId":{"type":"string"}, + "MirrorDestination":{ + "type":"string", + "documentation":"Mirror source for object reads." + }, "MissingMeta":{"type":"integer"}, "MultipartUpload":{ "type":"structure", @@ -4347,6 +4702,7 @@ "NextPartNumberMarker":{"type":"integer"}, "NextToken":{"type":"string"}, "NextUploadIdMarker":{"type":"string"}, + "NextVersionIdMarker":{"type":"string"}, "NoSuchBucket":{ "type":"structure", "members":{ @@ -4448,6 +4804,10 @@ "Key":{ "shape":"ObjectKey", "documentation":"Key name of the object to delete." + }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

VersionId for the specific version of the object to delete.

" } } }, @@ -4480,6 +4840,54 @@ "GLACIER" ] }, + "ObjectVersion":{ + "type":"structure", + "members":{ + "ETag":{ + "shape":"ETag", + "documentation":"

" + }, + "Size":{ + "shape":"Size", + "documentation":"

Size in bytes of the object.

" + }, + "StorageClass":{ + "shape":"ObjectVersionStorageClass", + "documentation":"

The class of storage used to store the object.

" + }, + "Key":{ + "shape":"ObjectKey", + "documentation":"

The object key.

" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

Version ID of an object.

" + }, + "IsLatest":{ + "shape":"IsLatest", + "documentation":"

Specifies whether the object is (true) or is not (false) the latest version of an object.

" + }, + "LastModified":{ + "shape":"LastModified", + "documentation":"

Date and time the object was last modified.

" + }, + "Owner":{ + "shape":"Owner", + "documentation":"

" + } + }, + "documentation":"

" + }, + "ObjectVersionId":{"type":"string"}, + "ObjectVersionList":{ + "type":"list", + "member":{"shape":"ObjectVersion"}, + "flattened":true + }, + "ObjectVersionStorageClass":{ + "type":"string", + "enum":["STANDARD"] + }, "Owner":{ "type":"structure", "members":{ @@ -4537,12 +4945,7 @@ "Prefix":{"type":"string"}, "ProtectionConfiguration":{ "type":"structure", - "required":[ - "Status", - "MinimumRetention", - "DefaultRetention", - "MaximumRetention" - ], + "required":[], "members":{ "Status":{ "shape":"BucketProtectionStatus", @@ -4827,6 +5230,68 @@ }, "payload":"ReplicationConfiguration" }, + "PutBucketTaggingRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Tagging" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "documentation":"

", + "location":"header", + "locationName":"Content-MD5" + }, + "Tagging":{ + "shape":"Tagging", + "documentation":"

", + "locationName":"Tagging", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"Tagging" + }, + "PutBucketVersioningRequest":{ + "type":"structure", + "required":[ + "Bucket", + "VersioningConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

", + "location":"uri", + "locationName":"Bucket" + }, + "ContentMD5":{ + "shape":"ContentMD5", + "documentation":"

", + "location":"header", + "locationName":"Content-MD5" + }, + "MFA":{ + "shape":"MFA", + "documentation":"

The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

", + "location":"header", + "locationName":"x-amz-mfa" + }, + "VersioningConfiguration":{ + "shape":"VersioningConfiguration", + "documentation":"

", + "locationName":"VersioningConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"VersioningConfiguration" + }, "PutBucketWebsiteRequest":{ "type":"structure", "required":[ @@ -4930,6 +5395,12 @@ "documentation":"Ignored by COS if present.", "location":"header", "locationName":"x-amz-request-payer" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

VersionId used to reference a specific version of the object.

", + "location":"querystring", + "locationName":"versionId" } }, "payload":"AccessControlPolicy" @@ -4955,6 +5426,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption" }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

Version of the object.

", + "location":"header", + "locationName":"x-amz-version-id" + }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", "documentation":"If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.", @@ -5516,6 +5993,24 @@ } } }, + "TagSet":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + } + }, + "Tagging":{ + "type":"structure", + "required":["TagSet"], + "members":{ + "TagSet":{ + "shape":"TagSet", + "documentation":"

" + } + }, + "documentation":"

" + }, "TargetBucket":{"type":"string"}, "TargetGrant":{ "type":"structure", @@ -5629,6 +6124,12 @@ "UploadPartCopyOutput":{ "type":"structure", "members":{ + "CopySourceVersionId":{ + "shape":"CopySourceVersionId", + "documentation":"

The version of the source object that was copied, if you have enabled versioning on the source bucket.

", + "location":"header", + "locationName":"x-amz-copy-source-version-id" + }, "CopyPartResult":{"shape":"CopyPartResult"}, "ServerSideEncryption":{ "shape":"ServerSideEncryption", @@ -5892,6 +6393,22 @@ "payload":"Body" }, "Value":{"type":"string"}, + "VersionIdMarker":{"type":"string"}, + "VersioningConfiguration":{ + "type":"structure", + "members":{ + "MFADelete":{ + "shape":"MFADelete", + "documentation":"

Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.

", + "locationName":"MfaDelete" + }, + "Status":{ + "shape":"BucketVersioningStatus", + "documentation":"

The versioning state of the bucket.

" + } + }, + "documentation":"

Describes the versioning state of an Amazon S3 bucket. For more information, see PUT Bucket versioning in the Amazon Simple Storage Service API Reference.

" + }, "WebsiteConfiguration":{ "type":"structure", "members":{ diff --git a/ibm_botocore/endpoint.py b/ibm_botocore/endpoint.py index 953b2c9..b1c8c59 100755 --- a/ibm_botocore/endpoint.py +++ b/ibm_botocore/endpoint.py @@ -41,7 +41,7 @@ def convert_to_response_dict(http_response, operation_model): This converts the requests library's HTTP response object to a dictionary. - :type http_response: requests.model.Response + :type http_response: ibm_botocore.vendored.requests.model.Response :param http_response: The HTTP response from an AWS service request. :rtype: dict diff --git a/ibm_botocore/exceptions.py b/ibm_botocore/exceptions.py index 7297374..6845464 100755 --- a/ibm_botocore/exceptions.py +++ b/ibm_botocore/exceptions.py @@ -454,6 +454,21 @@ class InvalidS3AddressingStyleError(BotoCoreError): ) +class UnsupportedS3ArnError(BotoCoreError): + """Error when S3 arn provided to Bucket parameter is not supported""" + fmt = ( + 'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only ' + 'ARNs for S3 access-points are supported.' + ) + + +class UnsupportedS3AccesspointConfigurationError(BotoCoreError): + """Error when an unsupported configuration is used with access-points""" + fmt = ( + 'Unsupported configuration when using S3 access-points: {msg}' + ) + + class InvalidRetryConfigurationError(BotoCoreError): """Error when invalid retry configuration is specified""" fmt = ( @@ -469,6 +484,15 @@ class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError): 'be an integer greater than or equal to zero.' ) + +class InvalidSTSRegionalEndpointsConfigError(BotoCoreError): + """Error when invalid sts regional endpoints configuration is specified""" + fmt = ( + 'STS regional endpoints option {sts_regional_endpoints_config} is ' + 'invaild. Valid options are: legacy and regional' + ) + + class StubResponseError(BotoCoreError): fmt = 'Error getting response stub for operation {operation_name}: {reason}' diff --git a/ibm_botocore/handlers.py b/ibm_botocore/handlers.py index 2112244..9151bf1 100755 --- a/ibm_botocore/handlers.py +++ b/ibm_botocore/handlers.py @@ -57,6 +57,10 @@ # combination of uppercase letters, lowercase letters, numbers, periods # (.), hyphens (-), and underscores (_). VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$') +VALID_S3_ARN = re.compile( + r'^arn:(aws).*:s3:[a-z\-0-9]+:[0-9]{12}:accesspoint[/:]' + r'[a-zA-Z0-9\-]{1,63}$' +) VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$') SERVICE_NAME_ALIASES = { @@ -216,10 +220,11 @@ def validate_bucket_name(params, **kwargs): if 'Bucket' not in params: return bucket = params['Bucket'] - if VALID_BUCKET.search(bucket) is None: + if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket): error_msg = ( 'Invalid bucket name "%s": Bucket name must match ' - 'the regex "%s"' % (bucket, VALID_BUCKET.pattern)) + 'the regex "%s" or be an ARN matching the regex "%s"' % ( + bucket, VALID_BUCKET.pattern, VALID_S3_ARN.pattern)) raise ParamValidationError(report=error_msg) @@ -306,7 +311,7 @@ def document_copy_source_form(section, event_name, **kwargs): value_portion = param_line.get_section('member-value') value_portion.clear_text() value_portion.write("'string' or {'Bucket': 'string', " - "'Key': 'string'}") + "'Key': 'string', 'VersionId': 'string'}") elif 'request-params' in event_name: param_section = section.get_section('CopySource') type_section = param_section.get_section('param-type') @@ -315,15 +320,16 @@ def document_copy_source_form(section, event_name, **kwargs): doc_section = param_section.get_section('param-documentation') doc_section.clear_text() doc_section.write( - "The name of the source bucket, key name of the source object. " - "You can either " + "The name of the source bucket, key name of the source object, " + "and optional version ID of the source object. You can either " "provide this value as a string or a dictionary. The " "string form is {bucket}/{key} or " - "{bucket}/{key} if you want to copy a " + "{bucket}/{key}?versionId={versionId} if you want to copy a " "specific version. You can also provide this value as a " "dictionary. The dictionary format is recommended over " "the string format because it is more explicit. The dictionary " - "format is: {'Bucket': 'bucket', 'Key': 'key'}." + "format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}." + " Note that the VersionId key is optional and may be omitted." ) @@ -334,10 +340,10 @@ def handle_copy_source_param(params, **kwargs): * CopySource provided as a string. We'll make a best effort to URL encode the key name as required. This will require - parsing the bucket from the CopySource value + parsing the bucket and version id from the CopySource value and only encoding the key. * CopySource provided as a dict. In this case we're - explicitly given the Bucket, Key so we're + explicitly given the Bucket, Key, and VersionId so we're able to encode the key and ensure this value is serialized and correctly sent to S3. @@ -855,6 +861,8 @@ def __call__(self, client, **kwargs): class HeaderToHostHoister(object): """Takes a header and moves it to the front of the hoststring. """ + _VALID_HOSTNAME = re.compile(r'(?!-)[a-z\d-]{1,63}(?.+)$' + ) + _BLACKLISTED_OPERATIONS = [ + 'CreateBucket' + ] + + def __init__(self, arn_parser=None): + self._arn_parser = arn_parser + if arn_parser is None: + self._arn_parser = ArnParser() + + def register(self, event_emitter): + event_emitter.register('before-parameter-build.s3', self.handle_arn) + + def handle_arn(self, params, model, context, **kwargs): + if model.name in self._BLACKLISTED_OPERATIONS: + return + arn_details = self._get_arn_details_from_bucket_param(params) + if arn_details is None: + return + if arn_details['resource_type'] == 'accesspoint': + self._store_accesspoint(params, context, arn_details) + + def _get_arn_details_from_bucket_param(self, params): + if 'Bucket' in params: + try: + arn = params['Bucket'] + arn_details = self._arn_parser.parse_arn(arn) + self._add_resource_type_and_name(arn, arn_details) + return arn_details + except InvalidArnException: + pass + return None + + def _add_resource_type_and_name(self, arn, arn_details): + match = self._ACCESSPOINT_RESOURCE_REGEX.match(arn_details['resource']) + if match: + arn_details['resource_type'] = 'accesspoint' + arn_details['resource_name'] = match.group('resource_name') + else: + raise UnsupportedS3ArnError(arn=arn) + + def _store_accesspoint(self, params, context, arn_details): + # Ideally the access-point would be stored as a parameter in the + # request where the serializer would then know how to serialize it, + # but access-points are not modeled in S3 operations so it would fail + # validation. Instead, we set the access-point to the bucket parameter + # to have some value set when serializing the request and additional + # information on the context from the arn to use in forming the + # access-point endpoint. + params['Bucket'] = arn_details['resource_name'] + context['s3_accesspoint'] = { + 'name': arn_details['resource_name'], + 'account': arn_details['account'], + 'partition': arn_details['partition'], + 'region': arn_details['region'], + } + + +class S3EndpointSetter(object): + _DEFAULT_PARTITION = 'aws' + _DEFAULT_DNS_SUFFIX = 'amazonaws.com' + + def __init__(self, endpoint_resolver, region=None, + s3_config=None, endpoint_url=None, partition=None): + self._endpoint_resolver = endpoint_resolver + self._region = region + self._s3_config = s3_config + if s3_config is None: + self._s3_config = {} + self._endpoint_url = endpoint_url + self._partition = partition + if partition is None: + self._partition = self._DEFAULT_PARTITION + + def register(self, event_emitter): + event_emitter.register('before-sign.s3', self.set_endpoint) + + def set_endpoint(self, request, **kwargs): + if self._use_accesspoint_endpoint(request): + self._validate_accesspoint_supported(request) + region_name = self._resolve_region_for_accesspoint_endpoint( + request) + self._switch_to_accesspoint_endpoint(request, region_name) + return + if self._use_accelerate_endpoint: + switch_host_s3_accelerate(request=request, **kwargs) + if self._s3_addressing_handler: + self._s3_addressing_handler(request=request, **kwargs) + + def _use_accesspoint_endpoint(self, request): + return 's3_accesspoint' in request.context + + def _validate_accesspoint_supported(self, request): + if self._endpoint_url: + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client cannot use a custom "endpoint_url" when ' + 'specifying an access-point ARN.' + ) + ) + if self._use_accelerate_endpoint: + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client does not support s3 accelerate configuration ' + 'when and access-point ARN is specified.' + ) + ) + request_partion = request.context['s3_accesspoint']['partition'] + if request_partion != self._partition: + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client is configured for "%s" partition, but access-point' + ' ARN provided is for "%s" partition. The client and ' + ' access-point partition must be the same.' % ( + self._partition, request_partion) + ) + ) + + def _resolve_region_for_accesspoint_endpoint(self, request): + if self._s3_config.get('use_arn_region', True): + accesspoint_region = request.context['s3_accesspoint']['region'] + # If we are using the region from the access point, + # we will also want to make sure that we set it as the + # signing region as well + self._override_signing_region(request, accesspoint_region) + return accesspoint_region + return self._region + + def _switch_to_accesspoint_endpoint(self, request, region_name): + original_components = urlsplit(request.url) + accesspoint_endpoint = urlunsplit(( + original_components.scheme, + self._get_accesspoint_netloc(request.context, region_name), + self._get_accesspoint_path( + original_components.path, request.context), + original_components.query, + '' + )) + logger.debug( + 'Updating URI from %s to %s' % (request.url, accesspoint_endpoint)) + request.url = accesspoint_endpoint + + def _get_accesspoint_netloc(self, request_context, region_name): + s3_accesspoint = request_context['s3_accesspoint'] + accesspoint_netloc_components = [ + '%s-%s' % (s3_accesspoint['name'], s3_accesspoint['account']), + 's3-accesspoint' + ] + if self._s3_config.get('use_dualstack_endpoint'): + accesspoint_netloc_components.append('dualstack') + accesspoint_netloc_components.extend( + [ + region_name, + self._get_dns_suffix(region_name) + ] + ) + return '.'.join(accesspoint_netloc_components) + + def _get_accesspoint_path(self, original_path, request_context): + # The Bucket parameter was substituted with the access-point name as + # some value was required in serializing the bucket name. Now that + # we are making the request directly to the access point, we will + # want to remove that access-point name from the path. + name = request_context['s3_accesspoint']['name'] + # All S3 operations require at least a / in their path. + return original_path.replace('/' + name, '', 1) or '/' + + def _get_dns_suffix(self, region_name): + resolved = self._endpoint_resolver.construct_endpoint( + 's3', region_name) + dns_suffix = self._DEFAULT_DNS_SUFFIX + if resolved and 'dnsSuffix' in resolved: + dns_suffix = resolved['dnsSuffix'] + return dns_suffix + + def _override_signing_region(self, request, region_name): + signing_context = { + 'region': region_name, + } + # S3SigV4Auth will use the context['signing']['region'] value to + # sign with if present. This is used by the Bucket redirector + # as well but we should be fine because the redirector is never + # used in combination with the accesspoint setting logic. + request.context['signing'] = signing_context + + + @CachedProperty + def _use_accelerate_endpoint(self): + # Enable accelerate if the configuration is set to to true or the + # endpoint being used matches one of the accelerate endpoints. + + # Accelerate has been explicitly configured. + if self._s3_config.get('use_accelerate_endpoint'): + return True + + # Accelerate mode is turned on automatically if an endpoint url is + # provided that matches the accelerate scheme. + if self._endpoint_url is None: + return False + + # Accelerate is only valid for Amazon endpoints. + netloc = urlsplit(self._endpoint_url).netloc + if not netloc.endswith('amazonaws.com'): + return False + + # The first part of the url should always be s3-accelerate. + parts = netloc.split('.') + if parts[0] != 's3-accelerate': + return False + + # Url parts between 's3-accelerate' and 'amazonaws.com' which + # represent different url features. + feature_parts = parts[1:-2] + + # There should be no duplicate url parts. + if len(feature_parts) != len(set(feature_parts)): + return False + + # Remaining parts must all be in the whitelist. + return all(p in S3_ACCELERATE_WHITELIST for p in feature_parts) + + @CachedProperty + def _addressing_style(self): + # Use virtual host style addressing if accelerate is enabled or if + # the given endpoint url is an accelerate endpoint. + if self._use_accelerate_endpoint: + return 'virtual' + + # If a particular addressing style is configured, use it. + configured_addressing_style = self._s3_config.get('addressing_style') + if configured_addressing_style: + return configured_addressing_style + + @CachedProperty + def _s3_addressing_handler(self): + # If virtual host style was configured, use it regardless of whether + # or not the bucket looks dns compatible. + if self._addressing_style == 'virtual': + logger.debug("Using S3 virtual host style addressing.") + return switch_to_virtual_host_style + + # If path style is configured, no additional steps are needed. If + # endpoint_url was specified, don't default to virtual. We could + # potentially default provided endpoint urls to virtual hosted + # style, but for now it is avoided. + if self._addressing_style == 'path' or self._endpoint_url is not None: + logger.debug("Using S3 path style addressing.") + return None + + logger.debug("Defaulting to S3 virtual host style addressing with " + "path style addressing fallback.") + + # By default, try to use virtual style with path fallback. + return fix_s3_host + class ContainerMetadataFetcher(object): diff --git a/tests/__init__.py b/tests/__init__.py index e942810..c0666ef 100755 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -45,6 +45,7 @@ from ibm_botocore.compat import parse_qs from ibm_botocore import utils from ibm_botocore import credentials +from ibm_botocore.stub import Stubber _LOADER = ibm_botocore.loaders.Loader() @@ -377,11 +378,11 @@ def stream(self, **kwargs): contents = self.read() -class ClientHTTPStubber(object): - def __init__(self, client, strict=True): +class BaseHTTPStubber(object): + def __init__(self, obj_with_event_emitter, strict=True): self.reset() self._strict = strict - self._client = client + self._obj_with_event_emitter = obj_with_event_emitter def reset(self): self.requests = [] @@ -396,11 +397,15 @@ def add_response(self, url='https://example.com', status=200, headers=None, response = AWSResponse(url, status, headers, raw) self.responses.append(response) + @property + def _events(self): + raise NotImplementedError('_events') + def start(self): - self._client.meta.events.register('before-send', self) + self._events.register('before-send', self) def stop(self): - self._client.meta.events.unregister('before-send', self) + self._events.unregister('before-send', self) def __enter__(self): self.start() @@ -421,3 +426,109 @@ def __call__(self, request, **kwargs): raise HTTPStubberException('Insufficient responses') else: return None + + +class ClientHTTPStubber(BaseHTTPStubber): + @property + def _events(self): + return self._obj_with_event_emitter.meta.events + + +class SessionHTTPStubber(BaseHTTPStubber): + @property + def _events(self): + return self._obj_with_event_emitter.get_component('event_emitter') + + +class ConsistencyWaiterException(Exception): + pass + + +class ConsistencyWaiter(object): + """ + A waiter class for some check to reach a consistent state. + + :type min_successes: int + :param min_successes: The minimum number of successful check calls to + treat the check as stable. Default of 1 success. + + :type max_attempts: int + :param min_successes: The maximum number of times to attempt calling + the check. Default of 20 attempts. + + :type delay: int + :param delay: The number of seconds to delay the next API call after a + failed check call. Default of 5 seconds. + """ + def __init__(self, min_successes=1, max_attempts=20, delay=5, + delay_initial_poll=False): + self.min_successes = min_successes + self.max_attempts = max_attempts + self.delay = delay + self.delay_initial_poll = delay_initial_poll + + def wait(self, check, *args, **kwargs): + """ + Wait until the check succeeds the configured number of times + + :type check: callable + :param check: A callable that returns True or False to indicate + if the check succeeded or failed. + + :type args: list + :param args: Any ordered arguments to be passed to the check. + + :type kwargs: dict + :param kwargs: Any keyword arguments to be passed to the check. + """ + attempts = 0 + successes = 0 + if self.delay_initial_poll: + time.sleep(self.delay) + while attempts < self.max_attempts: + attempts += 1 + if check(*args, **kwargs): + successes += 1 + if successes >= self.min_successes: + return + else: + time.sleep(self.delay) + fail_msg = self._fail_message(attempts, successes) + raise ConsistencyWaiterException(fail_msg) + + def _fail_message(self, attempts, successes): + format_args = (attempts, successes) + return 'Failed after %s attempts, only had %s successes' % format_args + + +class StubbedSession(ibm_botocore.session.Session): + def __init__(self, *args, **kwargs): + super(StubbedSession, self).__init__(*args, **kwargs) + self._cached_clients = {} + self._client_stubs = {} + + def create_client(self, service_name, *args, **kwargs): + if service_name not in self._cached_clients: + client = self._create_stubbed_client(service_name, *args, **kwargs) + self._cached_clients[service_name] = client + return self._cached_clients[service_name] + + def _create_stubbed_client(self, service_name, *args, **kwargs): + client = super(StubbedSession, self).create_client( + service_name, *args, **kwargs) + stubber = Stubber(client) + self._client_stubs[service_name] = stubber + return client + + def stub(self, service_name): + if service_name not in self._client_stubs: + self.create_client(service_name) + return self._client_stubs[service_name] + + def activate_stubs(self): + for stub in self._client_stubs.values(): + stub.activate() + + def verify_stubs(self): + for stub in self._client_stubs.values(): + stub.assert_no_pending_responses() diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py index c313f76..397fb63 100644 --- a/tests/functional/test_waiter_config.py +++ b/tests/functional/test_waiter_config.py @@ -156,7 +156,7 @@ def _validate_acceptor(acceptor, op_model, waiter_name): # JMESPath expression against the output. We'll then # check a few things about this returned search result. search_result = _search_jmespath_expression(expression, op_model) - if not search_result: + if search_result is None: raise AssertionError("JMESPath expression did not match " "anything for waiter '%s': %s" % (waiter_name, expression)) diff --git a/tests/integration/test_client.py b/tests/integration/test_client.py index 3331666..d6d36d5 100644 --- a/tests/integration/test_client.py +++ b/tests/integration/test_client.py @@ -20,63 +20,6 @@ from ibm_botocore.exceptions import EndpointConnectionError -class TestBucketWithVersions(unittest.TestCase): - def setUp(self): - self.session = ibm_botocore.session.get_session() - self.client = self.session.create_client('s3', region_name='us-west-2') - self.bucket_name = 'botocoretest%s' % random_chars(50) - - def extract_version_ids(self, versions): - version_ids = [] - for marker in versions['DeleteMarkers']: - version_ids.append(marker['VersionId']) - for version in versions['Versions']: - version_ids.append(version['VersionId']) - return version_ids - - def test_create_versioned_bucket(self): - # Verifies we can: - # 1. Create a bucket - # 2. Enable versioning - # 3. Put an Object - self.client.create_bucket( - Bucket=self.bucket_name, - CreateBucketConfiguration={ - 'LocationConstraint': 'us-west-2' - } - ) - self.addCleanup(self.client.delete_bucket, Bucket=self.bucket_name) - - self.client.put_bucket_versioning( - Bucket=self.bucket_name, - VersioningConfiguration={"Status": "Enabled"}) - response = self.client.put_object( - Bucket=self.bucket_name, Key='testkey', Body='bytes body') - self.addCleanup(self.client.delete_object, - Bucket=self.bucket_name, - Key='testkey', - VersionId=response['VersionId']) - - response = self.client.get_object( - Bucket=self.bucket_name, Key='testkey') - self.assertEqual(response['Body'].read(), b'bytes body') - - response = self.client.delete_object(Bucket=self.bucket_name, - Key='testkey') - # This cleanup step removes the DeleteMarker that's created - # from the delete_object call above. - self.addCleanup(self.client.delete_object, - Bucket=self.bucket_name, - Key='testkey', - VersionId=response['VersionId']) - # Object does not exist anymore. - with self.assertRaises(ClientError): - self.client.get_object(Bucket=self.bucket_name, Key='testkey') - versions = self.client.list_object_versions(Bucket=self.bucket_name) - version_ids = self.extract_version_ids(versions) - self.assertEqual(len(version_ids), 2) - - # This is really a combination of testing the debug logging mechanism # as well as the response wire log, which theoretically could be # implemented in any number of modules, which makes it hard to pick diff --git a/tests/integration/test_client_http.py b/tests/integration/test_client_http.py index bd6392d..d5fed2c 100644 --- a/tests/integration/test_client_http.py +++ b/tests/integration/test_client_http.py @@ -12,7 +12,7 @@ ConnectTimeoutError, ReadTimeoutError, EndpointConnectionError, ConnectionClosedError, ) -from requests import exceptions as requests_exceptions +from ibm_botocore.vendored.requests import exceptions as requests_exceptions class TestClientHTTPBehavior(unittest.TestCase): @@ -20,6 +20,9 @@ def setUp(self): self.port = unused_port() self.localhost = 'http://localhost:%s/' % self.port self.session = ibm_botocore.session.get_session() + # We need to set fake credentials to ensure credentials aren't searched + # for which might make additional API calls (assume role, etc). + self.session.set_credentials('fakeakid', 'fakesecret') @unittest.skip('Test has suddenly become extremely flakey.') def test_can_proxy_https_request_with_auth(self): diff --git a/tests/integration/test_ec2.py b/tests/integration/test_ec2.py index a69b1cd..b2606c3 100644 --- a/tests/integration/test_ec2.py +++ b/tests/integration/test_ec2.py @@ -30,7 +30,8 @@ def test_can_make_request(self): result = self.client.describe_availability_zones() zones = list( sorted(a['ZoneName'] for a in result['AvailabilityZones'])) - self.assertEqual(zones, ['us-west-2a', 'us-west-2b', 'us-west-2c']) + self.assertTrue( + set(['us-west-2a', 'us-west-2b', 'us-west-2c']).issubset(zones)) def test_get_console_output_handles_error(self): # Want to ensure the underlying ClientError is propogated @@ -63,9 +64,9 @@ def test_can_paginate_with_page_size(self): self.assertEqual(len(results), 3) for parsed in results: reserved_inst_offer = parsed['ReservedInstancesOfferings'] - # There should only be one reserved instance offering on each - # page. - self.assertEqual(len(reserved_inst_offer), 1) + # There should be no more than one reserved instance + # offering on each page. + self.assertLessEqual(len(reserved_inst_offer), 1) def test_can_fall_back_to_old_starting_token(self): # Using an operation that we know will paginate. @@ -81,65 +82,5 @@ def test_can_fall_back_to_old_starting_token(self): self.fail("Old style paginator failed.") -@attr('slow') -class TestCopySnapshotCustomization(unittest.TestCase): - def setUp(self): - self.session = ibm_botocore.session.get_session() - # However, all the test fixture setup/cleanup can use - # the client interface. - self.client = self.session.create_client('ec2', 'us-west-2') - self.client_us_east_1 = self.session.create_client( - 'ec2', 'us-east-1') - - def create_volume(self, encrypted=False): - available_zones = self.client.describe_availability_zones() - first_zone = available_zones['AvailabilityZones'][0]['ZoneName'] - response = self.client.create_volume( - Size=1, AvailabilityZone=first_zone, Encrypted=encrypted) - volume_id = response['VolumeId'] - self.addCleanup(self.client.delete_volume, VolumeId=volume_id) - self.client.get_waiter('volume_available').wait(VolumeIds=[volume_id]) - return volume_id - - def create_snapshot(self, volume_id): - response = self.client.create_snapshot(VolumeId=volume_id) - snapshot_id = response['SnapshotId'] - self.client.get_waiter('snapshot_completed').wait( - SnapshotIds=[snapshot_id]) - self.addCleanup(self.client.delete_snapshot, SnapshotId=snapshot_id) - return snapshot_id - - def cleanup_copied_snapshot(self, snapshot_id): - dest_client = self.session.create_client('ec2', 'us-east-1') - self.addCleanup(dest_client.delete_snapshot, - SnapshotId=snapshot_id) - dest_client.get_waiter('snapshot_completed').wait( - SnapshotIds=[snapshot_id]) - - def test_can_copy_snapshot(self): - volume_id = self.create_volume() - snapshot_id = self.create_snapshot(volume_id) - - result = self.client_us_east_1.copy_snapshot( - SourceRegion='us-west-2', - SourceSnapshotId=snapshot_id) - self.assertIn('SnapshotId', result) - - # Cleanup code. We can wait for the snapshot to be complete - # and then we can delete the snapshot. - self.cleanup_copied_snapshot(result['SnapshotId']) - - def test_can_copy_encrypted_snapshot(self): - # Note that we're creating an encrypted volume here. - volume_id = self.create_volume(encrypted=True) - snapshot_id = self.create_snapshot(volume_id) - - result = self.client_us_east_1.copy_snapshot( - SourceRegion='us-west-2', - SourceSnapshotId=snapshot_id) - self.assertIn('SnapshotId', result) - self.cleanup_copied_snapshot(result['SnapshotId']) - - if __name__ == '__main__': unittest.main() diff --git a/tests/integration/test_elastictranscoder.py b/tests/integration/test_elastictranscoder.py index be1f25b..e9b7850 100644 --- a/tests/integration/test_elastictranscoder.py +++ b/tests/integration/test_elastictranscoder.py @@ -39,6 +39,8 @@ def setUp(self): def create_bucket(self): bucket_name = 'ets-bucket-1-%s' % random_chars(50) self.s3_client.create_bucket(Bucket=bucket_name) + waiter = self.s3_client.get_waiter('bucket_exists') + waiter.wait(Bucket=bucket_name) self.addCleanup( self.s3_client.delete_bucket, Bucket=bucket_name) return bucket_name diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 2affa3a..0c1bbe3 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -11,7 +11,10 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import unittest, temporary_file, random_chars, ClientHTTPStubber +from tests import ( + unittest, temporary_file, random_chars, ClientHTTPStubber, + ConsistencyWaiter, +) import os import time from collections import defaultdict @@ -24,20 +27,20 @@ from contextlib import closing from nose.plugins.attrib import attr +import urllib3 from ibm_botocore.endpoint import Endpoint from ibm_botocore.exceptions import ConnectionClosedError -from ibm_botocore.compat import six, zip_longest +from ibm_botocore.compat import six, zip_longest, OrderedDict import ibm_botocore.session import ibm_botocore.auth import ibm_botocore.credentials -import requests from ibm_botocore.config import Config -from ibm_botocore.exceptions import ClientError +from ibm_botocore.exceptions import ClientError, WaiterError def random_bucketname(): - return 'botocoretest-' + random_chars(10) + return 'botocoretest-' + random_chars(50) LOG = logging.getLogger('ibm_botocore.tests.integration') @@ -45,6 +48,24 @@ def random_bucketname(): _DEFAULT_REGION = 'us-west-2' +def http_get(url): + http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED') + response = http.request('GET', url) + return response + + +def http_post(url, data, files): + http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED') + merged_data = OrderedDict() + merged_data.update(data) + merged_data.update(files) + response = http.request( + 'POST', url, + fields=merged_data, + ) + return response + + def setup_module(): s3 = ibm_botocore.session.get_session().create_client('s3') waiter = s3.get_waiter('bucket_exists') @@ -67,6 +88,9 @@ def setup_module(): def clear_out_bucket(bucket, region, delete_bucket=False): s3 = ibm_botocore.session.get_session().create_client( 's3', region_name=region) + # Ensure the bucket exists before attempting to wipe it out + exists_waiter = s3.get_waiter('bucket_exists') + exists_waiter.wait(Bucket=bucket) page = s3.get_paginator('list_objects') # Use pages paired with batch delete_objects(). for page in page.paginate(Bucket=bucket): @@ -74,17 +98,23 @@ def clear_out_bucket(bucket, region, delete_bucket=False): if keys: s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) if delete_bucket: - try: - s3.delete_bucket(Bucket=bucket) - except Exception as e: - # We can sometimes get exceptions when trying to - # delete a bucket. We'll let the waiter make - # the final call as to whether the bucket was able - # to be deleted. - LOG.debug("delete_bucket() raised an exception: %s", - e, exc_info=True) - waiter = s3.get_waiter('bucket_not_exists') - waiter.wait(Bucket=bucket) + for _ in range(5): + try: + s3.delete_bucket(Bucket=bucket) + break + except s3.exceptions.NoSuchBucket: + exists_waiter.wait(Bucket=bucket) + except Exception as e: + # We can sometimes get exceptions when trying to + # delete a bucket. We'll let the waiter make + # the final call as to whether the bucket was able + # to be deleted. + LOG.debug("delete_bucket() raised an exception: %s", + e, exc_info=True) + not_exists_waiter = s3.get_waiter('bucket_not_exists') + not_exists_waiter.wait(Bucket=bucket) + except WaiterError: + continue def teardown_module(): @@ -92,6 +122,9 @@ def teardown_module(): class BaseS3ClientTest(unittest.TestCase): + + DEFAULT_DELAY = 5 + def setUp(self): self.bucket_name = _SHARED_BUCKET self.region = _DEFAULT_REGION @@ -117,14 +150,24 @@ def create_bucket(self, region_name, bucket_name=None, client=None): response = bucket_client.create_bucket(**bucket_kwargs) self.assert_status_code(response, 200) waiter = bucket_client.get_waiter('bucket_exists') - waiter.wait(Bucket=bucket_name) + consistency_waiter = ConsistencyWaiter( + min_successes=3, delay=self.DEFAULT_DELAY, + delay_initial_poll=True) + consistency_waiter.wait( + lambda: waiter.wait(Bucket=bucket_name) is None + ) self.addCleanup(clear_out_bucket, bucket_name, region_name, True) return bucket_name - def create_object(self, key_name, body='foo'): - self.client.put_object( - Bucket=self.bucket_name, Key=key_name, - Body=body) + def create_object(self, key_name, body='foo', num_attempts=3): + for _ in range(num_attempts): + try: + self.client.put_object( + Bucket=self.bucket_name, Key=key_name, + Body=body) + break + except self.client.exceptions.NoSuchBucket: + time.sleep(self.DEFAULT_DELAY) self.wait_until_key_exists(self.bucket_name, key_name) def make_tempdir(self): @@ -154,6 +197,18 @@ def _wait_for_key(self, bucket_name, key_name, extra_params=None, for _ in range(min_successes): waiter.wait(**params) + def _check_bucket_versioning(self, bucket, enabled=True): + client = self.session.create_client('s3', region_name=self.region) + response = client.get_bucket_versioning(Bucket=bucket) + status = response.get('Status') + return status == 'Enabled' if enabled else status != 'Enabled' + + def wait_until_versioning_enabled(self, bucket, min_successes=3): + waiter = ConsistencyWaiter( + min_successes=min_successes, + delay=self.DEFAULT_DELAY, delay_initial_poll=True) + waiter.wait(self._check_bucket_versioning, bucket) + class TestS3BaseWithBucket(BaseS3ClientTest): def setUp(self): @@ -454,6 +509,21 @@ def test_unicode_system_character_with_list_v2(self): self.assertEqual(len(parsed['Contents']), 1) self.assertEqual(parsed['Contents'][0]['Key'], 'foo%08') + def test_unicode_system_character_with_list_object_versions(self): + # Verify we can use a unicode system character which would normally + # break the xml parser + key_name = 'foo\x03' + self.create_object(key_name) + self.addCleanup(self.delete_object, key_name, self.bucket_name) + parsed = self.client.list_object_versions(Bucket=self.bucket_name) + self.assertEqual(len(parsed['Versions']), 1) + self.assertEqual(parsed['Versions'][0]['Key'], key_name) + + parsed = self.client.list_object_versions(Bucket=self.bucket_name, + EncodingType='url') + self.assertEqual(len(parsed['Versions']), 1) + self.assertEqual(parsed['Versions'][0]['Key'], 'foo%03') + def test_thread_safe_auth(self): self.auth_paths = [] emitter = self.session.get_component('event_emitter') @@ -600,7 +670,7 @@ def test_presign_sigv2(self): "Host was suppose to use DNS style, instead " "got: %s" % presigned_url) # Try to retrieve the object using the presigned url. - self.assertEqual(requests.get(presigned_url).content, b'foo') + self.assertEqual(http_get(presigned_url).data, b'foo') def test_presign_with_existing_query_string_values(self): content_disposition = 'attachment; filename=foo.txt;' @@ -608,10 +678,10 @@ def test_presign_with_existing_query_string_values(self): 'get_object', Params={ 'Bucket': self.bucket_name, 'Key': self.key, 'ResponseContentDisposition': content_disposition}) - response = requests.get(presigned_url) + response = http_get(presigned_url) self.assertEqual(response.headers['Content-Disposition'], content_disposition) - self.assertEqual(response.content, b'foo') + self.assertEqual(response.data, b'foo') def test_presign_sigv4(self): self.client_config.signature_version = 's3v4' @@ -626,7 +696,7 @@ def test_presign_sigv4(self): "Host was suppose to be the us-east-1 endpoint, instead " "got: %s" % presigned_url) # Try to retrieve the object using the presigned url. - self.assertEqual(requests.get(presigned_url).content, b'foo') + self.assertEqual(http_get(presigned_url).data, b'foo') def test_presign_post_sigv2(self): @@ -656,9 +726,9 @@ def test_presign_post_sigv2(self): "got: %s" % post_args['url']) # Try to retrieve the object using the presigned url. - r = requests.post( - post_args['url'], data=post_args['fields'], files=files) - self.assertEqual(r.status_code, 204) + r = http_post(post_args['url'], data=post_args['fields'], + files=files) + self.assertEqual(r.status, 204) def test_presign_post_sigv4(self): self.client_config.signature_version = 's3v4' @@ -690,9 +760,9 @@ def test_presign_post_sigv4(self): "Host was suppose to use us-east-1 endpoint, instead " "got: %s" % post_args['url']) - r = requests.post( - post_args['url'], data=post_args['fields'], files=files) - self.assertEqual(r.status_code, 204) + r = http_post(post_args['url'], data=post_args['fields'], + files=files) + self.assertEqual(r.status, 204) class TestS3PresignNonUsStandard(BaseS3PresignTest): @@ -715,7 +785,7 @@ def test_presign_sigv2(self): "Host was suppose to use DNS style, instead " "got: %s" % presigned_url) # Try to retrieve the object using the presigned url. - self.assertEqual(requests.get(presigned_url).content, b'foo') + self.assertEqual(http_get(presigned_url).data, b'foo') def test_presign_sigv4(self): # For a newly created bucket, you can't use virtualhosted @@ -738,7 +808,7 @@ def test_presign_sigv4(self): "Host was suppose to be the us-west-2 endpoint, instead " "got: %s" % presigned_url) # Try to retrieve the object using the presigned url. - self.assertEqual(requests.get(presigned_url).content, b'foo') + self.assertEqual(http_get(presigned_url).data, b'foo') def test_presign_post_sigv2(self): # Create some of the various supported conditions. @@ -765,9 +835,9 @@ def test_presign_post_sigv2(self): "Host was suppose to use DNS style, instead " "got: %s" % post_args['url']) - r = requests.post( - post_args['url'], data=post_args['fields'], files=files) - self.assertEqual(r.status_code, 204) + r = http_post(post_args['url'], data=post_args['fields'], + files=files) + self.assertEqual(r.status, 204) def test_presign_post_sigv4(self): self.client_config.signature_version = 's3v4' @@ -798,9 +868,9 @@ def test_presign_post_sigv4(self): "Host was suppose to use DNS style, instead " "got: %s" % post_args['url']) - r = requests.post( - post_args['url'], data=post_args['fields'], files=files) - self.assertEqual(r.status_code, 204) + r = http_post(post_args['url'], data=post_args['fields'], + files=files) + self.assertEqual(r.status, 204) class TestCreateBucketInOtherRegion(TestS3BaseWithBucket): @@ -1203,10 +1273,63 @@ def test_redirects_head_object(self): key = 'foo' self.bucket_client.put_object( Bucket=self.bucket_name, Key=key, Body='bar') - + self.wait_until_key_exists(self.bucket_name, key) try: response = self.client.head_object( Bucket=self.bucket_name, Key=key) self.assertEqual(response.get('ContentLength'), len(key)) except ClientError as e: self.fail("S3 Client failed to redirect Head Object: %s" % e) + + +class TestBucketWithVersions(BaseS3ClientTest): + def extract_version_ids(self, versions): + version_ids = [] + for marker in versions['DeleteMarkers']: + version_ids.append(marker['VersionId']) + for version in versions['Versions']: + version_ids.append(version['VersionId']) + return version_ids + + def test_create_versioned_bucket(self): + # Verifies we can: + # 1. Create a bucket + # 2. Enable versioning + # 3. Put an Object + bucket = self.create_bucket(self.region) + + self.client.put_bucket_versioning( + Bucket=bucket, + VersioningConfiguration={"Status": "Enabled"}, + ) + self.wait_until_versioning_enabled(bucket) + + key = 'testkey' + body = b'bytes body' + response = self.client.put_object(Bucket=bucket, Key=key, Body=body) + self.addCleanup( + self.client.delete_object, + Bucket=bucket, + Key=key, + VersionId=response['VersionId'] + ) + self.wait_until_key_exists(bucket, key) + + response = self.client.get_object(Bucket=bucket, Key=key) + self.assertEqual(response['Body'].read(), body) + + response = self.client.delete_object(Bucket=bucket, Key=key) + # This cleanup step removes the DeleteMarker that's created + # from the delete_object call above. + self.addCleanup( + self.client.delete_object, + Bucket=bucket, + Key=key, + VersionId=response['VersionId'] + ) + # Object does not exist anymore. + with self.assertRaises(ClientError): + self.client.get_object(Bucket=bucket, Key=key) + versions = self.client.list_object_versions(Bucket=bucket) + version_ids = self.extract_version_ids(versions) + self.assertEqual(len(version_ids), 2) diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py index fdc9eaf..b7aa7db 100644 --- a/tests/integration/test_smoke.py +++ b/tests/integration/test_smoke.py @@ -14,6 +14,7 @@ import mock from pprint import pformat import warnings +import logging from nose.tools import assert_equal, assert_true from tests import ClientHTTPStubber @@ -109,6 +110,7 @@ 'workspaces': {'DescribeWorkspaces': {}}, } + # Same thing as the SMOKE_TESTS hash above, except these verify # that we get an error response back from the server because # we've sent invalid params. @@ -165,11 +167,6 @@ 'gamelift': {'DescribeBuild': {'BuildId': 'fake-build-id'}}, 'glacier': {'ListVaults': {'accountId': 'fake'}}, 'iam': {'GetUser': {'UserName': 'fake'}}, - 'importexport': {'CreateJob': { - 'JobType': 'Import', - 'ValidateOnly': False, - 'Manifest': 'fake', - }}, 'kinesis': {'DescribeStream': {'StreamName': 'fake'}}, 'kms': {'GetKeyPolicy': {'KeyId': 'fake', 'PolicyName': 'fake'}}, 'lambda': {'Invoke': {'FunctionName': 'fake'}}, @@ -198,7 +195,7 @@ }}, 'swf': {'DescribeDomain': {'name': 'fake'}}, 'waf': {'GetWebACL': {'WebACLId': 'fake'}}, - 'workspaces': {'DescribeWorkspaces': {'DirectoryId': 'fake'}}, + 'workspaces': {'DescribeWorkspaces': {'DirectoryId': 'fake-directory-id'}}, } REGION = 'us-east-1' @@ -207,6 +204,8 @@ 'efs': 'us-west-2', 'inspector': 'us-west-2', } +MAX_RETRIES = 8 +logger = logging.getLogger(__name__) def _get_client(session, service): @@ -214,7 +213,24 @@ def _get_client(session, service): region_name = os.environ['AWS_SMOKE_TEST_REGION'] else: region_name = REGION_OVERRIDES.get(service, REGION) - return session.create_client(service, region_name=region_name) + client = session.create_client(service, region_name=region_name) + client.meta.events.register_first('needs-retry.*.*', retry_handler) + return client + + +def retry_handler(response, attempts, **kwargs): + if response is not None: + _, parsed = response + code = parsed.get('Error', {}).get('Code') + # Catch ThrottleException, Throttling. + is_throttle_error = code is not None and 'throttl' in code.lower() + if is_throttle_error and attempts <= MAX_RETRIES: + # We want the exponential behavior with a fixed 10 second + # minimum, e.g. 11, 12, 14, 18, 26. With a max retries of 8, + # this is about 7-8 minutes total we'll retry. + retry_delay = (2 ** (attempts - 1)) + 10 + logger.debug("Using custom retry delay of: %s", retry_delay) + return retry_delay def _list_services(dict_entries): diff --git a/tests/unit/auth/test_signers.py b/tests/unit/auth/test_signers.py index 25a3244..1ff1162 100644 --- a/tests/unit/auth/test_signers.py +++ b/tests/unit/auth/test_signers.py @@ -360,20 +360,6 @@ def test_blacklist_trace_id(self): def test_blacklist_headers(self): self._test_blacklist_header('user-agent', 'botocore/1.4.11') - def test_context_sets_signing_region(self): - original_signing_region = 'eu-central-1' - new_signing_region = 'us-west-2' - self.auth.add_auth(self.request) - auth = self.request.headers['Authorization'] - self.assertIn(original_signing_region, auth) - self.assertNotIn(new_signing_region, auth) - - self.request.context = {'signing': {'region': new_signing_region}} - self.auth.add_auth(self.request) - auth = self.request.headers['Authorization'] - self.assertIn(new_signing_region, auth) - self.assertNotIn(original_signing_region, auth) - def test_uses_sha256_if_config_value_is_true(self): self.client_config.s3['payload_signing_enabled'] = True self.auth.add_auth(self.request) diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py index 84b310e..57b12a1 100644 --- a/tests/unit/test_args.py +++ b/tests/unit/test_args.py @@ -18,8 +18,10 @@ import mock from ibm_botocore import args +from ibm_botocore import exceptions from ibm_botocore.client import ClientEndpointBridge from ibm_botocore.config import Config +from ibm_botocore.configprovider import ConfigValueStore from ibm_botocore.hooks import HierarchicalEmitter from ibm_botocore.model import ServiceModel @@ -27,26 +29,44 @@ class TestCreateClientArgs(unittest.TestCase): def setUp(self): self.event_emitter = mock.Mock(HierarchicalEmitter) + self.config_store = ConfigValueStore() self.args_create = args.ClientArgsCreator( - self.event_emitter, None, None, None, None) + self.event_emitter, None, None, None, None, self.config_store) + self.service_name = 's3' self.region = 'us-west-2' self.endpoint_url = 'https://ec2/' - self.service_model = mock.Mock(ServiceModel) - self.service_model.metadata = { - 'serviceFullName': 'MyService', - 'protocol': 'query' - } - self.service_model.operation_names = [] + self.service_model = self._get_service_model() self.bridge = mock.Mock(ClientEndpointBridge) - self.bridge.resolve.return_value = { - 'region_name': self.region, 'signature_version': 'v4', - 'endpoint_url': self.endpoint_url, - 'signing_name': 'ec2', 'signing_region': self.region, - 'metadata': {}} + self._set_endpoint_bridge_resolve() self.default_socket_options = [ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) ] + def _get_service_model(self, service_name=None): + if service_name is None: + service_name = self.service_name + service_model = mock.Mock(ServiceModel) + service_model.service_name = service_name + service_model.endpoint_prefix = service_name + service_model.metadata = { + 'serviceFullName': 'MyService', + 'protocol': 'query' + } + service_model.operation_names = [] + return service_model + + def _set_endpoint_bridge_resolve(self, **override_kwargs): + ret_val = { + 'region_name': self.region, + 'signature_version': 'v4', + 'endpoint_url': self.endpoint_url, + 'signing_name': self.service_name, + 'signing_region': self.region, + 'metadata': {} + } + ret_val.update(**override_kwargs) + self.bridge.resolve.return_value = ret_val + def call_get_client_args(self, **override_kwargs): call_kwargs = { 'service_model': self.service_model, @@ -80,87 +100,35 @@ def assert_create_endpoint_call(self, mock_endpoint, **override_kwargs): ) def test_compute_s3_configuration(self): - scoped_config = {} - client_config = None - self.assertIsNone( - self.args_create.compute_s3_config( - scoped_config, client_config)) - - def test_compute_s3_config_only_scoped_config(self): - scoped_config = { - 's3': {'use_accelerate_endpoint': True}, - } - client_config = None - self.assertEqual( - self.args_create.compute_s3_config(scoped_config, client_config), - {'use_accelerate_endpoint': True} - ) - - def test_client_s3_accelerate_from_varying_forms_of_true(self): - scoped_config= {'s3': {'use_accelerate_endpoint': 'True'}} - client_config = None + self.assertIsNone(self.args_create.compute_s3_config(None)) + def test_compute_s3_config_only_config_store(self): + self.config_store.set_config_variable( + 's3', {'use_accelerate_endpoint': True}) self.assertEqual( - self.args_create.compute_s3_config( - {'s3': {'use_accelerate_endpoint': 'True'}}, - client_config=None), - {'use_accelerate_endpoint': True} - ) - self.assertEqual( - self.args_create.compute_s3_config( - {'s3': {'use_accelerate_endpoint': 'true'}}, - client_config=None), - {'use_accelerate_endpoint': True} - ) - self.assertEqual( - self.args_create.compute_s3_config( - {'s3': {'use_accelerate_endpoint': True}}, - client_config=None), + self.args_create.compute_s3_config(None), {'use_accelerate_endpoint': True} ) def test_client_s3_accelerate_from_client_config(self): self.assertEqual( self.args_create.compute_s3_config( - scoped_config=None, client_config=Config(s3={'use_accelerate_endpoint': True}) ), {'use_accelerate_endpoint': True} ) - def test_client_s3_accelerate_client_config_overrides_scoped(self): + def test_client_s3_accelerate_client_config_overrides_config_store(self): + self.config_store.set_config_variable( + 's3', {'use_accelerate_endpoint': False}) self.assertEqual( self.args_create.compute_s3_config( - scoped_config={'s3': {'use_accelerate_endpoint': False}}, client_config=Config(s3={'use_accelerate_endpoint': True}) ), # client_config beats scoped_config {'use_accelerate_endpoint': True} ) - def test_client_s3_dualstack_handles_varying_forms_of_true(self): - scoped_config= {'s3': {'use_dualstack_endpoint': 'True'}} - client_config = None - - self.assertEqual( - self.args_create.compute_s3_config( - {'s3': {'use_dualstack_endpoint': 'True'}}, - client_config=None), - {'use_dualstack_endpoint': True} - ) - self.assertEqual( - self.args_create.compute_s3_config( - {'s3': {'use_dualstack_endpoint': 'true'}}, - client_config=None), - {'use_dualstack_endpoint': True} - ) - self.assertEqual( - self.args_create.compute_s3_config( - {'s3': {'use_dualstack_endpoint': True}}, - client_config=None), - {'use_dualstack_endpoint': True} - ) - def test_max_pool_from_client_config_forwarded_to_endpoint_creator(self): config = ibm_botocore.config.Config(max_pool_connections=20) with mock.patch('ibm_botocore.args.EndpointCreator') as m: diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index c5faa6f..6600968 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -22,6 +22,7 @@ from ibm_botocore import hooks from ibm_botocore.client import ClientEndpointBridge from ibm_botocore.credentials import Credentials +from ibm_botocore.configprovider import ConfigValueStore from ibm_botocore.exceptions import ParamValidationError from ibm_botocore.exceptions import InvalidS3AddressingStyleError from ibm_botocore.exceptions import UnknownSignatureVersionError @@ -116,6 +117,7 @@ def setUp(self): } self.resolver.construct_endpoint.return_value = self.endpoint_data self.resolver.get_available_endpoints.return_value = ['us-west-2'] + self.config_store = ConfigValueStore() def tearDown(self): self.endpoint_creator_patch.stop() @@ -133,7 +135,8 @@ def create_client_creator(self, endpoint_creator=None, event_emitter=None, retry_config_translator=None, response_parser_factory=None, endpoint_prefix=None, - exceptions_factory=None): + exceptions_factory=None, + config_store=None): if event_emitter is None: event_emitter = hooks.HierarchicalEmitter() if retry_handler_factory is None: @@ -148,10 +151,14 @@ def create_client_creator(self, endpoint_creator=None, event_emitter=None, self.endpoint_creator_cls.return_value = endpoint_creator if exceptions_factory is None: exceptions_factory = ClientExceptionsFactory() + if config_store is None: + config_store = self.config_store creator = client.ClientCreator( self.loader, self.resolver, 'user-agent', event_emitter, retry_handler_factory, retry_config_translator, - response_parser_factory, exceptions_factory) + response_parser_factory, exceptions_factory, + config_store + ) return creator def assert_no_param_error_raised(self, client): @@ -1201,12 +1208,12 @@ def test_client_s3_addressing_style_with_bad_value(self): ) self.assertIsNone(client.meta.config.s3) - def test_client_s3_addressing_style_with_scoped_config(self): - creator = self.create_client_creator() - client = creator.create_client( - 'myservice', 'us-west-2', - scoped_config={'s3': {'addressing_style': 'virtual'}} + def test_client_s3_addressing_style_with_config_store(self): + self.config_store.set_config_variable( + 's3', {'addressing_style': 'virtual'} ) + creator = self.create_client_creator() + client = creator.create_client('myservice', 'us-west-2') self.assertEqual( client.meta.config.s3['addressing_style'], 'virtual') @@ -1214,126 +1221,24 @@ def test_client_s3_addressing_style_with_incorrect_style(self): with self.assertRaises(InvalidS3AddressingStyleError): ibm_botocore.config.Config(s3={'addressing_style': 'foo'}) - def test_client_s3_addressing_style_config_overrides_scoped_config(self): + def test_client_s3_addressing_style_config_overrides_config_store(self): + self.config_store.set_config_variable( + 's3', {'addressing_style': 'virtual'}) creator = self.create_client_creator() my_client = creator.create_client( 'myservice', 'us-west-2', - scoped_config={'s3': {'addressing_style': 'virtual'}}, - client_config=ibm_botocore.config.Config(s3={'addressing_style': 'auto'}) + client_config=ibm_botocore.config.Config( + s3={'addressing_style': 'auto'}) ) self.assertEqual( my_client.meta.config.s3['addressing_style'], 'auto') - def test_client_s3_addressing_style_default_registers_correctly(self): - event_emitter = self.create_mock_emitter() - creator = self.create_client_creator(event_emitter=event_emitter) - client = creator.create_client('s3', 'us-west-2') - self.assertIn( - mock.call('before-sign.s3', utils.fix_s3_host), - client.meta.events.register.call_args_list - ) - - def test_client_s3_addressing_style_auto_registers_correctly(self): - event_emitter = self.create_mock_emitter() - creator = self.create_client_creator(event_emitter=event_emitter) - client = creator.create_client( - 's3', 'us-west-2', - scoped_config={'s3': {'addressing_style': 'auto'}} - ) - self.assertIn( - mock.call('before-sign.s3', utils.fix_s3_host), - client.meta.events.register.call_args_list - ) - - def test_client_s3_addressing_style_virtual_registers_correctly(self): - event_emitter = self.create_mock_emitter() - creator = self.create_client_creator(event_emitter=event_emitter) - client = creator.create_client( - 's3', 'us-west-2', - scoped_config={'s3': {'addressing_style': 'virtual'}} - ) - self.assertNotIn( - mock.call('before-sign.s3', utils.fix_s3_host), - client.meta.events.unregister.call_args_list - ) - self.assertIn( - mock.call('before-sign.s3', utils.switch_to_virtual_host_style), - client.meta.events.register.call_args_list - ) - - def test_client_s3_addressing_style_path_registers_correctly(self): - event_emitter = self.create_mock_emitter() - creator = self.create_client_creator(event_emitter=event_emitter) - client = creator.create_client( - 's3', 'us-west-2', - scoped_config={'s3': {'addressing_style': 'path'}} - ) - self.assertNotIn( - mock.call('before-sign.s3', utils.fix_s3_host), - client.meta.events.register.call_args_list - ) - self.assertNotIn( - mock.call('before-sign.s3', utils.switch_to_virtual_host_style), - client.meta.events.register.call_args_list - ) - - def test_custom_endpoint_uses_path_style(self): - event_emitter = self.create_mock_emitter() - creator = self.create_client_creator(event_emitter=event_emitter) - - # fix_s3_host should be registered if we don't provide a url - client = creator.create_client('s3', 'us-west-2') - self.assertIn( - mock.call('before-sign.s3', utils.fix_s3_host), - client.meta.events.register.call_args_list - ) - - # If we do provide a url, fix_s3_host should not be registered - event_emitter.reset_mock() - client = creator.create_client( - 's3', 'us-west-2', - endpoint_url="foo.com" - ) - self.assertNotIn( - mock.call('before-sign.s3', mock.ANY), - client.meta.events.register.call_args_list - ) - - def test_custom_accelerate_url_forces_virtual_host(self): - event_emitter = self.create_mock_emitter() - creator = self.create_client_creator(event_emitter=event_emitter) - client = creator.create_client( - 's3', 'us-west-2', - endpoint_url='https://s3-accelerate.amazonaws.com' + def test_client_payload_signing_from_config_store(self): + self.config_store.set_config_variable( + 's3', {'payload_signing_enabled': True} ) - self.assertIn( - mock.call('before-sign.s3', utils.switch_to_virtual_host_style), - client.meta.events.register.call_args_list - ) - - def test_client_payload_signing_from_scoped_config(self): - creator = self.create_client_creator() - my_client = creator.create_client( - 'myservice', 'us-west-2', - scoped_config={'s3': {'payload_signing_enabled': True}} - ) - self.assertEqual( - my_client.meta.config.s3['payload_signing_enabled'], True) - - def test_client_payload_signing_from_varying_forms_of_true(self): - creator = self.create_client_creator() - my_client = creator.create_client( - 'myservice', 'us-west-2', - scoped_config={'s3': {'payload_signing_enabled': 'True'}} - ) - self.assertEqual( - my_client.meta.config.s3['payload_signing_enabled'], True) - creator = self.create_client_creator() - my_client = creator.create_client( - 'myservice', 'us-west-2', - scoped_config={'s3': {'payload_signing_enabled': 'true'}} - ) + my_client = creator.create_client('myservice', 'us-west-2') self.assertEqual( my_client.meta.config.s3['payload_signing_enabled'], True) @@ -1358,29 +1263,12 @@ def test_client_payload_signing_client_config_overrides_scoped(self): self.assertEqual( my_client.meta.config.s3['payload_signing_enabled'], True) - def test_client_s3_accelerate_from_scoped_config(self): - creator = self.create_client_creator() - my_client = creator.create_client( - 'myservice', 'us-west-2', - scoped_config={'s3': {'use_accelerate_endpoint': True}} + def test_client_s3_accelerate_from_config_store(self): + self.config_store.set_config_variable( + 's3', {'use_accelerate_endpoint': True} ) - self.assertEqual( - my_client.meta.config.s3['use_accelerate_endpoint'], True) - - def test_client_s3_accelerate_from_varying_forms_of_true(self): creator = self.create_client_creator() - my_client = creator.create_client( - 'myservice', 'us-west-2', - scoped_config={'s3': {'use_accelerate_endpoint': 'True'}} - ) - self.assertEqual( - my_client.meta.config.s3['use_accelerate_endpoint'], True) - - creator = self.create_client_creator() - my_client = creator.create_client( - 'myservice', 'us-west-2', - scoped_config={'s3': {'use_accelerate_endpoint': 'true'}} - ) + my_client = creator.create_client('myservice', 'us-west-2') self.assertEqual( my_client.meta.config.s3['use_accelerate_endpoint'], True) @@ -1394,11 +1282,13 @@ def test_client_s3_accelerate_from_client_config(self): self.assertEqual( my_client.meta.config.s3['use_accelerate_endpoint'], True) - def test_client_s3_accelerate_client_config_overrides_scoped(self): + def test_client_s3_accelerate_client_config_overrides_config_store(self): + self.config_store.set_config_variable( + 's3', {'use_accelerate_endpoint': False} + ) creator = self.create_client_creator() my_client = creator.create_client( 'myservice', 'us-west-2', - scoped_config={'s3': {'use_accelerate_endpoint': False}}, client_config=client.Config(s3={'use_accelerate_endpoint': True}) ) diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py index 559308a..bd443f8 100644 --- a/tests/unit/test_config_provider.py +++ b/tests/unit/test_config_provider.py @@ -21,6 +21,7 @@ from ibm_botocore.configprovider import InstanceVarProvider from ibm_botocore.configprovider import EnvironmentProvider from ibm_botocore.configprovider import ScopedConfigProvider +from ibm_botocore.configprovider import SectionConfigProvider from ibm_botocore.configprovider import ConstantProvider from ibm_botocore.configprovider import ChainProvider from ibm_botocore.configprovider import ConfigChainFactory @@ -74,17 +75,152 @@ def test_chain_builder_can_provide_env_var(self): expected_value='from-env', ) + def test_does_provide_none_if_no_variable_exists_in_env_var_list(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={}, + create_config_chain_args={ + 'env_var_names': ['FOO'], + }, + expected_value=None, + ) + + def test_does_provide_value_if_variable_exists_in_env_var_list(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={'FOO': 'bar'}, + scoped_config_map={}, + create_config_chain_args={ + 'env_var_names': ['FOO'], + }, + expected_value='bar', + ) + + def test_does_provide_first_non_none_value_first_in_env_var_list(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={'FOO': 'baz'}, + scoped_config_map={}, + create_config_chain_args={ + 'env_var_names': ['FOO', 'BAR'], + }, + expected_value='baz', + ) + + def test_does_provide_first_non_none_value_second_in_env_var_list(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={'BAR': 'baz'}, + scoped_config_map={}, + create_config_chain_args={ + 'env_var_names': ['FOO', 'BAR'], + }, + expected_value='baz', + ) + + def test_does_provide_none_if_all_list_env_vars_are_none(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={}, + create_config_chain_args={ + 'env_var_names': ['FOO', 'BAR'], + }, + expected_value=None, + ) + + def test_does_provide_first_value_when_both_env_vars_exist(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={'FOO': 'baz', 'BAR': 'buz'}, + scoped_config_map={}, + create_config_chain_args={ + 'env_var_names': ['FOO', 'BAR'], + }, + expected_value='baz', + ) + def test_chain_builder_can_provide_config_var(self): self.assert_chain_does_provide( instance_map={}, environ_map={}, scoped_config_map={'config_var': 'from-config'}, create_config_chain_args={ - 'config_property_name': 'config_var', + 'config_property_names': 'config_var', }, expected_value='from-config', ) + def test_chain_builder_can_provide_nested_config_var(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={'config_var': {'nested-key': 'nested-val'}}, + create_config_chain_args={ + 'config_property_names': ('config_var', 'nested-key'), + }, + expected_value='nested-val', + ) + + def test_provide_value_from_config_list(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={'var': 'val'}, + create_config_chain_args={ + 'config_property_names': ['var'], + }, + expected_value='val', + ) + + def test_provide_value_from_config_list_looks_for_non_none_vals(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={'non_none_var': 'non_none_val'}, + create_config_chain_args={ + 'config_property_names': ['none_var', 'non_none_var'], + }, + expected_value='non_none_val', + ) + + def test_provide_value_from_config_list_retrieves_first_non_none_val(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={ + 'first': 'first_val', + 'second': 'second_val' + }, + create_config_chain_args={ + 'config_property_names': ['first', 'second'], + }, + expected_value='first_val', + ) + + def test_provide_value_from_config_list_if_all_vars_are_none(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={}, + create_config_chain_args={ + 'config_property_names': ['config1', 'config2'], + }, + expected_value=None, + ) + + def test_provide_value_from_list_with_nested_var(self): + self.assert_chain_does_provide( + instance_map={}, + environ_map={}, + scoped_config_map={'section': {'nested_var': 'nested_val'}}, + create_config_chain_args={ + 'config_property_names': [('section', 'nested_var')], + }, + expected_value='nested_val', + ) + def test_chain_builder_can_provide_default(self): self.assert_chain_does_provide( instance_map={}, @@ -104,7 +240,7 @@ def test_chain_provider_does_follow_priority_instance_var(self): create_config_chain_args={ 'instance_name': 'instance_var', 'env_var_names': 'ENV_VAR', - 'config_property_name': 'config_var', + 'config_property_names': 'config_var', 'default': 'from-default', }, expected_value='from-instance', @@ -118,7 +254,7 @@ def test_chain_provider_does_follow_priority_env_var(self): create_config_chain_args={ 'instance_name': 'instance_var', 'env_var_names': 'ENV_VAR', - 'config_property_name': 'config_var', + 'config_property_names': 'config_var', 'default': 'from-default', }, expected_value='from-env', @@ -132,7 +268,7 @@ def test_chain_provider_does_follow_priority_config(self): create_config_chain_args={ 'instance_name': 'instance_var', 'env_var_names': 'ENV_VAR', - 'config_property_name': 'config_var', + 'config_property_names': 'config_var', 'default': 'from-default', }, expected_value='from-config', @@ -146,7 +282,7 @@ def test_chain_provider_does_follow_priority_default(self): create_config_chain_args={ 'instance_name': 'instance_var', 'env_var_names': 'ENV_VAR', - 'config_property_name': 'config_var', + 'config_property_names': 'config_var', 'default': 'from-default', }, expected_value='from-default', @@ -220,78 +356,27 @@ def test_does_provide_none_if_value_not_in_dict(self): class TestEnvironmentProvider(unittest.TestCase): - def assert_does_provide(self, env, names, expected_value): - provider = EnvironmentProvider(names=names, env=env) + def assert_does_provide(self, env, name, expected_value): + provider = EnvironmentProvider(name=name, env=env) value = provider.provide() self.assertEqual(value, expected_value) def test_does_provide_none_if_no_variable_exists(self): self.assert_does_provide( - names='FOO', + name='FOO', env={}, expected_value=None, ) def test_does_provide_value_if_variable_exists(self): self.assert_does_provide( - names='FOO', + name='FOO', env={ 'FOO': 'bar', }, expected_value='bar', ) - def test_does_provide_none_if_no_variable_exists_in_list(self): - self.assert_does_provide( - names=['FOO'], - env={}, - expected_value=None, - ) - - def test_does_provide_value_if_variable_exists_in_list(self): - self.assert_does_provide( - names=['FOO'], - env={ - 'FOO': 'bar', - }, - expected_value='bar', - ) - - def test_does_provide_first_non_none_value_first(self): - self.assert_does_provide( - names=['FOO', 'BAR'], - env={ - 'FOO': 'baz', - }, - expected_value='baz', - ) - - def test_does_provide_first_non_none_value_second(self): - self.assert_does_provide( - names=['FOO', 'BAR'], - env={ - 'BAR': 'baz', - }, - expected_value='baz', - ) - - def test_does_provide_none_if_all_list_variables_are_none(self): - self.assert_does_provide( - names=['FOO', 'BAR'], - env={}, - expected_value=None, - ) - - def test_does_provide_first_value_when_both_exist(self): - self.assert_does_provide( - names=['FOO', 'BAR'], - env={ - 'FOO': 'baz', - 'BAR': 'buz', - }, - expected_value='baz', - ) - class TestScopedConfigProvider(unittest.TestCase): def assert_provides_value(self, config_file_values, config_var_name, @@ -323,6 +408,26 @@ def test_does_provide_none_if_var_not_in_config(self): expected_value=None, ) + def test_provide_nested_value(self): + self.assert_provides_value( + config_file_values={ + 'section': { + 'nested_var': 'nested_val' + } + }, + config_var_name=('section', 'nested_var'), + expected_value='nested_val', + ) + + def test_provide_nested_value_but_not_section(self): + self.assert_provides_value( + config_file_values={ + 'section': 'not-nested' + }, + config_var_name=('section', 'nested_var'), + expected_value=None, + ) + def _make_provider_that_returns(return_value): provider = mock.Mock(spec=BaseProvider) @@ -384,3 +489,68 @@ def test_can_provide_value(self): provider = ConstantProvider(value='foo') value = provider.provide() self.assertEqual(value, 'foo') + + +class TestSectionConfigProvider(unittest.TestCase): + def assert_provides_value(self, config_file_values, section_name, + expected_value, override_providers=None): + fake_session = mock.Mock(spec=session.Session) + fake_session.get_scoped_config.return_value = config_file_values + provider = SectionConfigProvider( + section_name=section_name, + session=fake_session, + override_providers=override_providers + ) + value = provider.provide() + self.assertEqual(value, expected_value) + + def test_provide_section_config(self): + self.assert_provides_value( + config_file_values={ + 'mysection': { + 'section_var': 'section_val' + } + }, + section_name='mysection', + expected_value={'section_var': 'section_val'}, + ) + + def test_provide_service_config_missing_service(self): + self.assert_provides_value( + config_file_values={}, + section_name='mysection', + expected_value=None, + ) + + def test_provide_service_config_not_a_section(self): + self.assert_provides_value( + config_file_values={'myservice': 'not-a-section'}, + section_name='mysection', + expected_value=None, + ) + + def test_provide_section_config_with_overrides(self): + self.assert_provides_value( + config_file_values={ + 'mysection': { + 'override_var': 'from_config_file', + 'no_override_var': 'from_config_file' + } + }, + section_name='mysection', + override_providers={'override_var': ConstantProvider('override')}, + expected_value={ + 'override_var': 'override', + 'no_override_var': 'from_config_file' + } + ) + + def test_provide_section_config_with_only_overrides(self): + self.assert_provides_value( + config_file_values={}, + section_name='mysection', + override_providers={'override_var': ConstantProvider('override')}, + expected_value={ + 'override_var': 'override', + } + ) diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py index 48f536f..4384bab 100644 --- a/tests/unit/test_credentials.py +++ b/tests/unit/test_credentials.py @@ -24,13 +24,15 @@ from ibm_botocore import credentials from ibm_botocore.utils import ContainerMetadataFetcher -from ibm_botocore.compat import json +from ibm_botocore.compat import json, six +from ibm_botocore.session import Session +from ibm_botocore.utils import FileWebIdentityTokenLoader from ibm_botocore.credentials import EnvProvider, create_assume_role_refresher from ibm_botocore.credentials import CredentialProvider, AssumeRoleProvider from ibm_botocore.credentials import ConfigProvider, SharedCredentialProvider -from ibm_botocore.credentials import Credentials -from ibm_botocore.configprovider import create_botocore_default_config_mapping -from ibm_botocore.configprovider import ConfigChainFactory +from ibm_botocore.credentials import ProcessProvider +from ibm_botocore.credentials import AssumeRoleWithWebIdentityProvider +from ibm_botocore.credentials import Credentials, ProfileProviderBuilder from ibm_botocore.configprovider import ConfigValueStore import ibm_botocore.exceptions import ibm_botocore.session @@ -691,6 +693,48 @@ def test_envvars_not_found(self): creds = provider.load() self.assertIsNone(creds) + def test_envvars_empty_string(self): + environ = { + 'AWS_ACCESS_KEY_ID': '', + 'AWS_SECRET_ACCESS_KEY': '', + 'AWS_SECURITY_TOKEN': '', + } + provider = credentials.EnvProvider(environ) + creds = provider.load() + self.assertIsNone(creds) + + def test_expiry_omitted_if_envvar_empty(self): + environ = { + 'AWS_ACCESS_KEY_ID': 'foo', + 'AWS_SECRET_ACCESS_KEY': 'bar', + 'AWS_SESSION_TOKEN': 'baz', + 'AWS_CREDENTIAL_EXPIRATION': '', + } + provider = credentials.EnvProvider(environ) + creds = provider.load() + # Because we treat empty env vars the same as not being provided, + # we should return static credentials and not a refreshable + # credential. + self.assertNotIsInstance(creds, credentials.RefreshableCredentials) + self.assertEqual(creds.access_key, 'foo') + self.assertEqual(creds.secret_key, 'bar') + self.assertEqual(creds.token, 'baz') + + def test_error_when_expiry_required_but_empty(self): + expiry_time = datetime.now(tzlocal()) - timedelta(hours=1) + environ = { + 'AWS_ACCESS_KEY_ID': 'foo', + 'AWS_SECRET_ACCESS_KEY': 'bar', + 'AWS_CREDENTIAL_EXPIRATION': expiry_time.isoformat(), + } + provider = credentials.EnvProvider(environ) + creds = provider.load() + + del environ['AWS_CREDENTIAL_EXPIRATION'] + + with self.assertRaises(ibm_botocore.exceptions.PartialCredentialsError): + creds.get_frozen_credentials() + def test_can_override_env_var_mapping(self): # We can change the env var provider to # use our specified env var names. @@ -765,6 +809,18 @@ def test_partial_creds_is_an_error(self): with self.assertRaises(ibm_botocore.exceptions.PartialCredentialsError): provider.load() + def test_partial_creds_is_an_error_empty_string(self): + # If the user provides an access key, they must also + # provide a secret key. Not doing so will generate an + # error. + environ = { + 'AWS_ACCESS_KEY_ID': 'foo', + 'AWS_SECRET_ACCESS_KEY': '', + } + provider = credentials.EnvProvider(environ) + with self.assertRaises(ibm_botocore.exceptions.PartialCredentialsError): + provider.load() + def test_missing_access_key_id_raises_error(self): expiry_time = datetime.now(tzlocal()) - timedelta(hours=1) environ = { @@ -1314,15 +1370,7 @@ def setUp(self): 'metadata_service_timeout': 1, 'metadata_service_num_attempts': 1, } - self.fake_env_vars = {} - - chain_builder = ConfigChainFactory( - session=self.session, - environ=self.fake_env_vars, - ) - self.config_loader = ConfigValueStore( - mapping=create_botocore_default_config_mapping(chain_builder) - ) + self.config_loader = ConfigValueStore() for name, value in self.fake_instance_variables.items(): self.config_loader.set_config_variable(name, value) @@ -1362,12 +1410,6 @@ def test_no_profile_checks_env_provider(self): self.assertTrue( any(isinstance(p, EnvProvider) for p in resolver.providers)) - def test_env_provider_added_if_profile_from_env_set(self): - self.fake_env_vars['profile'] = 'profile-from-env' - resolver = credentials.create_credential_resolver(self.session) - self.assertTrue( - any(isinstance(p, EnvProvider) for p in resolver.providers)) - def test_default_cache(self): resolver = credentials.create_credential_resolver(self.session) cache = resolver.get_provider('assume-role').cache @@ -1822,6 +1864,51 @@ def test_external_id_provided(self): client.assume_role.assert_called_with( RoleArn='myrole', ExternalId='myid', RoleSessionName=mock.ANY) + def test_assume_role_with_duration(self): + self.fake_config['profiles']['development']['duration_seconds'] = 7200 + response = { + 'Credentials': { + 'AccessKeyId': 'foo', + 'SecretAccessKey': 'bar', + 'SessionToken': 'baz', + 'Expiration': self.some_future_time().isoformat(), + }, + } + client_creator = self.create_client_creator(with_response=response) + provider = credentials.AssumeRoleProvider( + self.create_config_loader(), client_creator, + cache={}, profile_name='development') + + # The credentials won't actually be assumed until they're requested. + provider.load().get_frozen_credentials() + + client = client_creator.return_value + client.assume_role.assert_called_with( + RoleArn='myrole', RoleSessionName=mock.ANY, + DurationSeconds=7200) + + def test_assume_role_with_bad_duration(self): + self.fake_config['profiles']['development']['duration_seconds'] = 'garbage value' + response = { + 'Credentials': { + 'AccessKeyId': 'foo', + 'SecretAccessKey': 'bar', + 'SessionToken': 'baz', + 'Expiration': self.some_future_time().isoformat(), + }, + } + client_creator = self.create_client_creator(with_response=response) + provider = credentials.AssumeRoleProvider( + self.create_config_loader(), client_creator, + cache={}, profile_name='development') + + # The credentials won't actually be assumed until they're requested. + provider.load().get_frozen_credentials() + + client = client_creator.return_value + client.assume_role.assert_called_with( + RoleArn='myrole', RoleSessionName=mock.ANY) + def test_assume_role_with_mfa(self): self.fake_config['profiles']['development']['mfa_serial'] = 'mfa' response = { @@ -2208,6 +2295,55 @@ def test_recursive_assume_role(self): ), ]) + def test_assume_role_with_profile_provider(self): + response = { + 'Credentials': { + 'AccessKeyId': 'foo', + 'SecretAccessKey': 'bar', + 'SessionToken': 'baz', + 'Expiration': self.some_future_time().isoformat() + }, + } + client_creator = self.create_client_creator(with_response=response) + mock_builder = mock.Mock(spec=ProfileProviderBuilder) + mock_builder.providers.return_value = [ProfileProvider('foo-profile')] + + provider = credentials.AssumeRoleProvider( + self.create_config_loader(), + client_creator, cache={}, + profile_name='development', + profile_provider_builder=mock_builder, + ) + + creds = provider.load().get_frozen_credentials() + + self.assertEqual(client_creator.call_count, 1) + client_creator.assert_called_with( + 'sts', + aws_access_key_id='foo-profile-access-key', + aws_secret_access_key='foo-profile-secret-key', + aws_session_token='foo-profile-token', + ) + + self.assertEqual(creds.access_key, 'foo') + self.assertEqual(creds.secret_key, 'bar') + self.assertEqual(creds.token, 'baz') + + +class ProfileProvider(object): + METHOD = 'fake' + + def __init__(self, profile_name): + self._profile_name = profile_name + + def load(self): + return Credentials( + '%s-access-key' % self._profile_name, + '%s-secret-key' % self._profile_name, + '%s-token' % self._profile_name, + self.METHOD + ) + class TestJSONCache(unittest.TestCase): def setUp(self): @@ -2529,7 +2665,7 @@ def setUp(self): spec=subprocess.Popen) def create_process_provider(self, profile_name='default'): - provider = credentials.ProcessProvider(profile_name, self.load_config, + provider = ProcessProvider(profile_name, self.load_config, popen=self.popen_mock) return provider @@ -2562,7 +2698,7 @@ def test_can_retrieve_via_process(self): 'AccessKeyId': 'foo', 'SecretAccessKey': 'bar', 'SessionToken': 'baz', - 'Expiration': '2020-01-01T00:00:00Z', + 'Expiration': '2999-01-01T00:00:00Z', }) provider = self.create_process_provider() @@ -2588,7 +2724,7 @@ def test_can_pass_arguments_through(self): 'AccessKeyId': 'foo', 'SecretAccessKey': 'bar', 'SessionToken': 'baz', - 'Expiration': '2020-01-01T00:00:00Z', + 'Expiration': '2999-01-01T00:00:00Z', }) provider = self.create_process_provider() @@ -2655,7 +2791,7 @@ def test_unsupported_version_raises_mismatch(self): 'AccessKeyId': 'foo', 'SecretAccessKey': 'bar', 'SessionToken': 'baz', - 'Expiration': '2020-01-01T00:00:00Z', + 'Expiration': '2999-01-01T00:00:00Z', }) provider = self.create_process_provider() @@ -2672,7 +2808,7 @@ def test_missing_version_in_payload_returned_raises_exception(self): 'AccessKeyId': 'foo', 'SecretAccessKey': 'bar', 'SessionToken': 'baz', - 'Expiration': '2020-01-01T00:00:00Z', + 'Expiration': '2999-01-01T00:00:00Z', }) provider = self.create_process_provider() @@ -2689,7 +2825,7 @@ def test_missing_access_key_raises_exception(self): # Missing access key. 'SecretAccessKey': 'bar', 'SessionToken': 'baz', - 'Expiration': '2020-01-01T00:00:00Z', + 'Expiration': '2999-01-01T00:00:00Z', }) provider = self.create_process_provider() @@ -2706,7 +2842,7 @@ def test_missing_secret_key_raises_exception(self): 'AccessKeyId': 'foo', # Missing secret key. 'SessionToken': 'baz', - 'Expiration': '2020-01-01T00:00:00Z', + 'Expiration': '2999-01-01T00:00:00Z', }) provider = self.create_process_provider() @@ -2723,7 +2859,7 @@ def test_missing_session_token(self): 'AccessKeyId': 'foo', 'SecretAccessKey': 'bar', # Missing session token. - 'Expiration': '2020-01-01T00:00:00Z', + 'Expiration': '2999-01-01T00:00:00Z', }) provider = self.create_process_provider() @@ -2772,3 +2908,23 @@ def test_missing_expiration_and_session_token(self): self.assertEqual(creds.secret_key, 'bar') self.assertIsNone(creds.token) self.assertEqual(creds.method, 'custom-process') + + +class TestProfileProviderBuilder(unittest.TestCase): + def setUp(self): + super(TestProfileProviderBuilder, self).setUp() + self.mock_session = mock.Mock(spec=Session) + self.builder = ProfileProviderBuilder(self.mock_session) + + def test_profile_provider_builder_order(self): + providers = self.builder.providers('some-profile') + expected_providers = [ + AssumeRoleWithWebIdentityProvider, + SharedCredentialProvider, + ProcessProvider, + ConfigProvider, + ] + self.assertEqual(len(providers), len(expected_providers)) + zipped_providers = six.moves.zip(providers, expected_providers) + for provider, expected_type in zipped_providers: + self.assertTrue(isinstance(provider, expected_type)) diff --git a/tests/unit/test_endpoint.py b/tests/unit/test_endpoint.py index edf162e..197eb85 100644 --- a/tests/unit/test_endpoint.py +++ b/tests/unit/test_endpoint.py @@ -11,11 +11,9 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import socket -import requests from tests import unittest from mock import Mock, patch, sentinel -from requests import ConnectionError from ibm_botocore.compat import six from ibm_botocore.awsrequest import AWSRequest @@ -23,12 +21,13 @@ from ibm_botocore.endpoint import EndpointCreator from ibm_botocore.exceptions import EndpointConnectionError from ibm_botocore.exceptions import ConnectionClosedError +from ibm_botocore.exceptions import HTTPClientError from ibm_botocore.httpsession import URLLib3Session from ibm_botocore.model import OperationModel, ServiceId -def request_dict(): - return { +def request_dict(**kwargs): + base = { 'headers': {}, 'body': '', 'url_path': '/', @@ -164,8 +163,8 @@ def test_retry_events_can_alter_behavior(self): def test_retry_on_socket_errors(self): self.event_emitter.emit.side_effect = self.get_emitter_responses( num_retries=1) - self.http_session.send.side_effect = ConnectionError() - with self.assertRaises(ConnectionError): + self.http_session.send.side_effect = HTTPClientError(error='wrapped') + with self.assertRaises(HTTPClientError): self.endpoint.make_request(self._operation, request_dict()) self.assert_events_emitted( self.event_emitter, diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py index 4c0fe48..2804eb1 100644 --- a/tests/unit/test_http_session.py +++ b/tests/unit/test_http_session.py @@ -102,7 +102,7 @@ def assert_request_sent(self, headers=None, body=None, url='/', chunked=False): url=url, body=body, headers=headers, - retries=False, + retries=ANY, assert_same_host=False, preload_content=False, decode_content=False, @@ -267,3 +267,19 @@ def test_aws_connection_classes_are_used(self): self.assertIs(http_class, AWSHTTPConnectionPool) https_class = self.pool_manager.pool_classes_by_scheme.get('https') self.assertIs(https_class, AWSHTTPSConnectionPool) + + def test_chunked_encoding_is_set_with_header(self): + session = URLLib3Session() + self.request.headers['Transfer-Encoding'] = 'chunked' + + session.send(self.request.prepare()) + self.assert_request_sent( + chunked=True, + headers={'Transfer-Encoding': 'chunked'}, + ) + + def test_chunked_encoding_is_not_set_without_header(self): + session = URLLib3Session() + + session.send(self.request.prepare()) + self.assert_request_sent(chunked=False) diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 1ec2db4..bead8c5 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -60,12 +60,12 @@ def setUp(self): 'region': config_chain_builder.create_config_chain( instance_name='region', env_var_names='FOO_REGION', - config_property_name='foo_region', + config_property_names='foo_region', ), 'data_path': config_chain_builder.create_config_chain( instance_name='data_path', env_var_names='FOO_DATA_PATH', - config_property_name='data_path', + config_property_names='data_path', ), 'config_file': config_chain_builder.create_config_chain( instance_name='config_file', @@ -78,11 +78,11 @@ def setUp(self): 'ca_bundle': config_chain_builder.create_config_chain( instance_name='ca_bundle', env_var_names='FOO_AWS_CA_BUNDLE', - config_property_name='foo_ca_bundle', + config_property_names='foo_ca_bundle', ), 'api_versions': config_chain_builder.create_config_chain( instance_name='api_versions', - config_property_name='foo_api_versions', + config_property_names='foo_api_versions', default={}, ), } diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py old mode 100644 new mode 100755 index c297ef9..3b09daf --- a/tests/unit/test_signers.py +++ b/tests/unit/test_signers.py @@ -19,6 +19,7 @@ import ibm_botocore import ibm_botocore.session import ibm_botocore.auth +import ibm_botocore.awsrequest from ibm_botocore.config import Config from ibm_botocore.credentials import Credentials from ibm_botocore.credentials import ReadOnlyCredentials @@ -43,6 +44,7 @@ def setUp(self): ServiceId('service_name'), 'region_name', 'signing_name', 'v4', self.credentials, self.emitter) self.fixed_credentials = self.credentials.get_frozen_credentials() + self.request = ibm_botocore.awsrequest.AWSRequest() class TestSigner(BaseSignerTest): @@ -63,7 +65,7 @@ def test_region_required_for_sigv4(self): ) with self.assertRaises(NoRegionError): - self.signer.sign('operation_name', mock.Mock()) + self.signer.sign('operation_name', self.request) def test_get_auth(self): auth_cls = mock.Mock() @@ -96,11 +98,9 @@ def test_get_auth_bad_override(self): signature_version='bad') def test_emits_choose_signer(self): - request = mock.Mock() - with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, {'v4': mock.Mock()}): - self.signer.sign('operation_name', request) + self.signer.sign('operation_name', self.request) self.emitter.emit_until_response.assert_called_with( 'choose-signer.service_name.operation_name', @@ -108,41 +108,37 @@ def test_emits_choose_signer(self): signature_version='v4', context=mock.ANY) def test_choose_signer_override(self): - request = mock.Mock() auth = mock.Mock() auth.REQUIRES_REGION = False self.emitter.emit_until_response.return_value = (None, 'custom') with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, {'custom': auth}): - self.signer.sign('operation_name', request) + self.signer.sign('operation_name', self.request) auth.assert_called_with(credentials=self.fixed_credentials) - auth.return_value.add_auth.assert_called_with(request) + auth.return_value.add_auth.assert_called_with(self.request) def test_emits_before_sign(self): - request = mock.Mock() - with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, {'v4': mock.Mock()}): - self.signer.sign('operation_name', request) + self.signer.sign('operation_name', self.request) self.emitter.emit.assert_called_with( 'before-sign.service_name.operation_name', - request=mock.ANY, signing_name='signing_name', + request=self.request, signing_name='signing_name', region_name='region_name', signature_version='v4', request_signer=self.signer, operation_name='operation_name') def test_disable_signing(self): # Returning ibm_botocore.UNSIGNED from choose-signer disables signing! - request = mock.Mock() auth = mock.Mock() self.emitter.emit_until_response.return_value = (None, ibm_botocore.UNSIGNED) with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, {'v4': auth}): - self.signer.sign('operation_name', request) + self.signer.sign('operation_name', self.request) auth.assert_not_called() @@ -166,12 +162,11 @@ def test_generate_url_emits_choose_signer(self): signature_version='v4-query', context=mock.ANY) def test_choose_signer_passes_context(self): - request = mock.Mock() - request.context = {'foo': 'bar'} + self.request.context = {'foo': 'bar'} with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, {'v4': mock.Mock()}): - self.signer.sign('operation_name', request) + self.signer.sign('operation_name', self.request) self.emitter.emit_until_response.assert_called_with( 'choose-signer.service_name.operation_name', @@ -341,14 +336,13 @@ def test_sign_with_signing_type_standard(self): auth = mock.Mock() post_auth = mock.Mock() query_auth = mock.Mock() - request = mock.Mock() auth_types = { 'v4-presign-post': post_auth, 'v4-query': query_auth, 'v4': auth } with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): - self.signer.sign('operation_name', request, + self.signer.sign('operation_name', self.request, signing_type='standard') self.assertFalse(post_auth.called) self.assertFalse(query_auth.called) @@ -362,14 +356,13 @@ def test_sign_with_signing_type_presign_url(self): auth = mock.Mock() post_auth = mock.Mock() query_auth = mock.Mock() - request = mock.Mock() auth_types = { 'v4-presign-post': post_auth, 'v4-query': query_auth, 'v4': auth } with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): - self.signer.sign('operation_name', request, + self.signer.sign('operation_name', self.request, signing_type='presign-url') self.assertFalse(post_auth.called) self.assertFalse(auth.called) @@ -383,14 +376,13 @@ def test_sign_with_signing_type_presign_post(self): auth = mock.Mock() post_auth = mock.Mock() query_auth = mock.Mock() - request = mock.Mock() auth_types = { 'v4-presign-post': post_auth, 'v4-query': query_auth, 'v4': auth } with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): - self.signer.sign('operation_name', request, + self.signer.sign('operation_name', self.request, signing_type='presign-post') self.assertFalse(auth.called) self.assertFalse(query_auth.called) @@ -401,27 +393,54 @@ def test_sign_with_signing_type_presign_post(self): ) def test_sign_with_region_name(self): - request = mock.Mock() auth = mock.Mock() auth_types = { 'v4': auth } with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): - self.signer.sign('operation_name', request, region_name='foo') + self.signer.sign('operation_name', self.request, region_name='foo') auth.assert_called_with( credentials=ReadOnlyCredentials('key', 'secret', None), service_name='signing_name', region_name='foo' ) + def test_sign_override_region_from_context(self): + auth = mock.Mock() + auth_types = { + 'v4': auth + } + self.request.context = {'signing': {'region': 'my-override-region'}} + with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): + self.signer.sign('operation_name', self.request) + auth.assert_called_with( + credentials=ReadOnlyCredentials('key', 'secret', None), + service_name='signing_name', + region_name='my-override-region' + ) + + def test_sign_with_region_name_overrides_context(self): + auth = mock.Mock() + auth_types = { + 'v4': auth + } + self.request.context = {'signing': {'region': 'context-override'}} + with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): + self.signer.sign('operation_name', self.request, + region_name='param-override') + auth.assert_called_with( + credentials=ReadOnlyCredentials('key', 'secret', None), + service_name='signing_name', + region_name='param-override' + ) + def test_sign_with_expires_in(self): - request = mock.Mock() auth = mock.Mock() auth_types = { 'v4': auth } with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): - self.signer.sign('operation_name', request, expires_in=2) + self.signer.sign('operation_name', self.request, expires_in=2) auth.assert_called_with( credentials=ReadOnlyCredentials('key', 'secret', None), service_name='signing_name', @@ -430,13 +449,13 @@ def test_sign_with_expires_in(self): ) def test_sign_with_custom_signing_name(self): - request = mock.Mock() auth = mock.Mock() auth_types = { 'v4': auth } with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): - self.signer.sign('operation_name', request, signing_name='foo') + self.signer.sign('operation_name', self.request, + signing_name='foo') auth.assert_called_with( credentials=ReadOnlyCredentials('key', 'secret', None), service_name='foo', @@ -467,7 +486,6 @@ def test_presign_with_custom_signing_name(self): self.assertEqual(presigned_url, 'https://foo.com') def test_unknown_signer_raises_unknown_on_standard(self): - request = mock.Mock() auth = mock.Mock() auth_types = { 'v4': auth @@ -475,11 +493,10 @@ def test_unknown_signer_raises_unknown_on_standard(self): self.emitter.emit_until_response.return_value = (None, 'custom') with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): with self.assertRaises(UnknownSignatureVersionError): - self.signer.sign('operation_name', request, + self.signer.sign('operation_name', self.request, signing_type='standard') def test_unknown_signer_raises_unsupported_when_not_standard(self): - request = mock.Mock() auth = mock.Mock() auth_types = { 'v4': auth @@ -487,11 +504,11 @@ def test_unknown_signer_raises_unsupported_when_not_standard(self): self.emitter.emit_until_response.return_value = (None, 'custom') with mock.patch.dict(ibm_botocore.auth.AUTH_TYPE_MAPS, auth_types): with self.assertRaises(UnsupportedSignatureVersionError): - self.signer.sign('operation_name', request, + self.signer.sign('operation_name', self.request, signing_type='presign-url') with self.assertRaises(UnsupportedSignatureVersionError): - self.signer.sign('operation_name', request, + self.signer.sign('operation_name', self.request, signing_type='presign-post') @@ -921,54 +938,7 @@ def test_generate_presigned_post_with_prefilled(self): def test_generate_presigned_post_non_s3_client(self): self.client = self.session.create_client('s3', 'us-west-2') - with self.assertRaises(TypeError): + with self.assertRaises(AttributeError): self.client.generate_presigned_post() -class TestGenerateDBAuthToken(BaseSignerTest): - maxDiff = None - - def setUp(self): - self.session = ibm_botocore.session.get_session() - self.client = self.session.create_client( - 's3', region_name='us-east-1', aws_access_key_id='akid', - aws_secret_access_key='skid', config=Config(signature_version='v4') - ) - - def test_generate_db_auth_token(self): - hostname = 'prod-instance.us-east-1.rds.amazonaws.com' - port = 3306 - username = 'someusername' - clock = datetime.datetime(2016, 11, 7, 17, 39, 33, tzinfo=tzutc()) - - with mock.patch('datetime.datetime') as dt: - dt.utcnow.return_value = clock - result = generate_db_auth_token( - self.client, hostname, port, username) - - expected_result = ( - 'prod-instance.us-east-1.rds.amazonaws.com:3306/?Action=connect' - '&DBUser=someusername&X-Amz-Algorithm=AWS4-HMAC-SHA256' - '&X-Amz-Date=20161107T173933Z&X-Amz-SignedHeaders=host' - '&X-Amz-Expires=900&X-Amz-Credential=akid%2F20161107%2F' - 'us-east-1%2Frds-db%2Faws4_request&X-Amz-Signature' - '=d1138cdbc0ca63eec012ec0fc6c2267e03642168f5884a7795320d4c18374c61' - ) - - # A scheme needs to be appended to the beginning or urlsplit may fail - # on certain systems. - assert_url_equal( - 'https://' + result, 'https://' + expected_result) - - def test_custom_region(self): - hostname = 'host.us-east-1.rds.amazonaws.com' - port = 3306 - username = 'mySQLUser' - region = 'us-west-2' - result = generate_db_auth_token( - self.client, hostname, port, username, Region=region) - - self.assertIn(region, result) - # The hostname won't be changed even if a different region is specified - self.assertIn(hostname, result) - diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 20dc973..04ffacd 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -26,7 +26,13 @@ from ibm_botocore.exceptions import InvalidExpressionError, ConfigNotFound from ibm_botocore.exceptions import ClientError, ConnectionClosedError from ibm_botocore.exceptions import InvalidDNSNameError, MetadataRetrievalError +from ibm_botocore.exceptions import ReadTimeoutError +from ibm_botocore.exceptions import ConnectTimeoutError +from ibm_botocore.exceptions import UnsupportedS3ArnError +from ibm_botocore.exceptions import UnsupportedS3AccesspointConfigurationError from ibm_botocore.model import ServiceModel +from ibm_botocore.model import OperationModel +from ibm_botocore.regions import EndpointResolver from ibm_botocore.utils import ensure_boolean from ibm_botocore.utils import is_json_value_header from ibm_botocore.utils import remove_dot_segments @@ -54,9 +60,14 @@ from ibm_botocore.utils import switch_host_s3_accelerate from ibm_botocore.utils import deep_merge from ibm_botocore.utils import S3RegionRedirector +from ibm_botocore.utils import InvalidArnException +from ibm_botocore.utils import ArnParser +from ibm_botocore.utils import S3ArnParamHandler +from ibm_botocore.utils import S3EndpointSetter from ibm_botocore.utils import ContainerMetadataFetcher -from ibm_botocore.utils import IMDSFetcher from ibm_botocore.utils import InstanceMetadataFetcher +from ibm_botocore.utils import IMDSFetcher +from ibm_botocore.utils import BadIMDSRequestError from ibm_botocore.model import DenormalizedStructureBuilder from ibm_botocore.model import ShapeResolver from ibm_botocore.config import Config @@ -1582,6 +1593,355 @@ def test_get_region_from_head_bucket_success(self): region = self.redirector.get_bucket_region('foo', response) self.assertEqual(region, 'eu-central-1') + def test_no_redirect_from_error_for_accesspoint(self): + request_dict = { + 'url': ( + 'https://myendpoint-123456789012.s3-accesspoint.' + 'us-west-2.amazonaws.com/key' + ), + 'context': { + 's3_accesspoint': {} + } + } + response = (None, { + 'Error': {'Code': '400', 'Message': 'Bad Request'}, + 'ResponseMetadata': { + 'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'} + } + }) + + self.operation.name = 'HeadObject' + redirect_response = self.redirector.redirect_from_error( + request_dict, response, self.operation) + self.assertEqual(redirect_response, None) + + def test_no_redirect_from_cache_for_accesspoint(self): + self.cache['foo'] = {'endpoint': 'foo-endpoint'} + self.redirector = S3RegionRedirector( + self.endpoint_bridge, self.client, cache=self.cache) + params = {'Bucket': 'foo'} + context = {'s3_accesspoint': {}} + self.redirector.redirect_from_cache(params, context) + self.assertNotIn('signing', context) + + +class TestArnParser(unittest.TestCase): + def setUp(self): + self.parser = ArnParser() + + def test_parse(self): + arn = 'arn:aws:s3:us-west-2:1023456789012:myresource' + self.assertEqual( + self.parser.parse_arn(arn), + { + 'partition': 'aws', + 'service': 's3', + 'region': 'us-west-2', + 'account': '1023456789012', + 'resource': 'myresource', + } + ) + + def test_parse_invalid_arn(self): + with self.assertRaises(InvalidArnException): + self.parser.parse_arn('arn:aws:s3') + + def test_parse_arn_with_resource_type(self): + arn = 'arn:aws:s3:us-west-2:1023456789012:bucket_name:mybucket' + self.assertEqual( + self.parser.parse_arn(arn), + { + 'partition': 'aws', + 'service': 's3', + 'region': 'us-west-2', + 'account': '1023456789012', + 'resource': 'bucket_name:mybucket', + } + ) + + def test_parse_arn_with_empty_elements(self): + arn = 'arn:aws:s3:::mybucket' + self.assertEqual( + self.parser.parse_arn(arn), + { + 'partition': 'aws', + 'service': 's3', + 'region': '', + 'account': '', + 'resource': 'mybucket', + } + ) + + +class TestS3ArnParamHandler(unittest.TestCase): + def setUp(self): + self.arn_handler = S3ArnParamHandler() + self.model = mock.Mock(OperationModel) + self.model.name = 'GetObject' + + def test_register(self): + event_emitter = mock.Mock() + self.arn_handler.register(event_emitter) + event_emitter.register.assert_called_with( + 'before-parameter-build.s3', self.arn_handler.handle_arn) + + def test_accesspoint_arn(self): + params = { + 'Bucket': 'arn:aws:s3:us-west-2:123456789012:accesspoint/endpoint' + } + context = {} + self.arn_handler.handle_arn(params, self.model, context) + self.assertEqual(params, {'Bucket': 'endpoint'}) + self.assertEqual( + context, + { + 's3_accesspoint': { + 'name': 'endpoint', + 'account': '123456789012', + 'region': 'us-west-2', + 'partition': 'aws', + } + } + ) + + def test_accesspoint_arn_with_colon(self): + params = { + 'Bucket': 'arn:aws:s3:us-west-2:123456789012:accesspoint:endpoint' + } + context = {} + self.arn_handler.handle_arn(params, self.model, context) + self.assertEqual(params, {'Bucket': 'endpoint'}) + self.assertEqual( + context, + { + 's3_accesspoint': { + 'name': 'endpoint', + 'account': '123456789012', + 'region': 'us-west-2', + 'partition': 'aws', + } + } + ) + + def test_errors_for_non_accesspoint_arn(self): + params = { + 'Bucket': 'arn:aws:s3:us-west-2:123456789012:unsupported:resource' + } + context = {} + with self.assertRaises(UnsupportedS3ArnError): + self.arn_handler.handle_arn(params, self.model, context) + + def test_ignores_bucket_names(self): + params = {'Bucket': 'mybucket'} + context = {} + self.arn_handler.handle_arn(params, self.model, context) + self.assertEqual(params, {'Bucket': 'mybucket'}) + self.assertEqual(context, {}) + + def test_ignores_create_bucket(self): + arn = 'arn:aws:s3:us-west-2:123456789012:accesspoint/endpoint' + params = {'Bucket': arn} + context = {} + self.model.name = 'CreateBucket' + self.arn_handler.handle_arn(params, self.model, context) + self.assertEqual(params, {'Bucket': arn}) + self.assertEqual(context, {}) + + +class TestS3EndpointSetter(unittest.TestCase): + def setUp(self): + self.operation_name = 'GetObject' + self.signature_version = 's3v4' + self.region_name = 'us-west-2' + self.account = '123456789012' + self.bucket = 'mybucket' + self.key = 'key.txt' + self.accesspoint_name = 'myaccesspoint' + self.partition = 'aws' + self.endpoint_resolver = mock.Mock() + self.dns_suffix = 'amazonaws.com' + self.endpoint_resolver.construct_endpoint.return_value = { + 'dnsSuffix': self.dns_suffix + } + self.endpoint_setter = self.get_endpoint_setter() + + def get_endpoint_setter(self, **kwargs): + setter_kwargs = { + 'endpoint_resolver': self.endpoint_resolver, + 'region': self.region_name, + } + setter_kwargs.update(kwargs) + return S3EndpointSetter(**setter_kwargs) + + def get_s3_request(self, bucket=None, key=None, scheme='https://', + querystring=None): + url = scheme + 's3.us-west-2.amazonaws.com/' + if bucket: + url += bucket + if key: + url += '/%s' % key + if querystring: + url += '?%s' % querystring + return AWSRequest(method='GET', headers={}, url=url) + + def get_s3_accesspoint_request(self, accesspoint_name=None, + accesspoint_context=None, + **s3_request_kwargs): + if not accesspoint_name: + accesspoint_name = self.accesspoint_name + request = self.get_s3_request(accesspoint_name, **s3_request_kwargs) + if accesspoint_context is None: + accesspoint_context = self.get_s3_accesspoint_context( + name=accesspoint_name) + request.context['s3_accesspoint'] = accesspoint_context + return request + + def get_s3_accesspoint_context(self, **overrides): + accesspoint_context = { + 'name': self.accesspoint_name, + 'account': self.account, + 'region': self.region_name, + 'partition': self.partition, + } + accesspoint_context.update(overrides) + return accesspoint_context + + def call_set_endpoint(self, endpoint_setter, request, **kwargs): + set_endpoint_kwargs = { + 'request': request, + 'operation_name': self.operation_name, + 'signature_version': self.signature_version, + 'region_name': self.region_name, + } + set_endpoint_kwargs.update(kwargs) + endpoint_setter.set_endpoint(**set_endpoint_kwargs) + + def test_register(self): + event_emitter = mock.Mock() + self.endpoint_setter.register(event_emitter) + event_emitter.register.assert_called_with( + 'before-sign.s3', self.endpoint_setter.set_endpoint) + + def test_accesspoint_endpoint(self): + request = self.get_s3_accesspoint_request() + self.call_set_endpoint(self.endpoint_setter, request=request) + expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % ( + self.accesspoint_name, self.account, self.region_name + ) + self.assertEqual(request.url, expected_url) + + def test_accesspoint_preserves_key_in_path(self): + request = self.get_s3_accesspoint_request(key=self.key) + self.call_set_endpoint(self.endpoint_setter, request=request) + expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/%s' % ( + self.accesspoint_name, self.account, self.region_name, + self.key + ) + self.assertEqual(request.url, expected_url) + + def test_accesspoint_preserves_scheme(self): + request = self.get_s3_accesspoint_request(scheme='http://') + self.call_set_endpoint(self.endpoint_setter, request=request) + expected_url = 'http://%s-%s.s3-accesspoint.%s.amazonaws.com/' % ( + self.accesspoint_name, self.account, self.region_name, + ) + self.assertEqual(request.url, expected_url) + + def test_accesspoint_preserves_query_string(self): + request = self.get_s3_accesspoint_request(querystring='acl') + self.call_set_endpoint(self.endpoint_setter, request=request) + expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/?acl' % ( + self.accesspoint_name, self.account, self.region_name, + ) + self.assertEqual(request.url, expected_url) + + def test_uses_resolved_dns_suffix(self): + self.endpoint_resolver.construct_endpoint.return_value = { + 'dnsSuffix': 'mysuffix.com' + } + request = self.get_s3_accesspoint_request() + self.call_set_endpoint(self.endpoint_setter, request=request) + expected_url = 'https://%s-%s.s3-accesspoint.%s.mysuffix.com/' % ( + self.accesspoint_name, self.account, self.region_name, + ) + self.assertEqual(request.url, expected_url) + + def test_uses_region_of_client_if_use_arn_disabled(self): + client_region = 'client-region' + self.endpoint_setter = self.get_endpoint_setter( + region=client_region, s3_config={'use_arn_region': False}) + request = self.get_s3_accesspoint_request() + self.call_set_endpoint(self.endpoint_setter, request=request) + expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % ( + self.accesspoint_name, self.account, client_region, + ) + self.assertEqual(request.url, expected_url) + + def test_accesspoint_errors_for_custom_endpoint(self): + endpoint_setter = self.get_endpoint_setter( + endpoint_url='https://custom.com') + request = self.get_s3_accesspoint_request() + with self.assertRaises(UnsupportedS3AccesspointConfigurationError): + self.call_set_endpoint(endpoint_setter, request=request) + + def test_errors_for_mismatching_partition(self): + endpoint_setter = self.get_endpoint_setter(partition='aws-cn') + accesspoint_context = self.get_s3_accesspoint_context(partition='aws') + request = self.get_s3_accesspoint_request( + accesspoint_context=accesspoint_context) + with self.assertRaises(UnsupportedS3AccesspointConfigurationError): + self.call_set_endpoint(endpoint_setter, request=request) + + def test_errors_for_mismatching_partition_when_using_client_region(self): + endpoint_setter = self.get_endpoint_setter( + s3_config={'use_arn_region': False}, partition='aws-cn' + ) + accesspoint_context = self.get_s3_accesspoint_context(partition='aws') + request = self.get_s3_accesspoint_request( + accesspoint_context=accesspoint_context) + with self.assertRaises(UnsupportedS3AccesspointConfigurationError): + self.call_set_endpoint(endpoint_setter, request=request) + + def test_set_endpoint_for_auto(self): + endpoint_setter = self.get_endpoint_setter( + s3_config={'addressing_style': 'auto'}) + request = self.get_s3_request(self.bucket, self.key) + self.call_set_endpoint(endpoint_setter, request) + expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % ( + self.bucket, self.key + ) + self.assertEqual(request.url, expected_url) + + def test_set_endpoint_for_virtual(self): + endpoint_setter = self.get_endpoint_setter( + s3_config={'addressing_style': 'virtual'}) + request = self.get_s3_request(self.bucket, self.key) + self.call_set_endpoint(endpoint_setter, request) + expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % ( + self.bucket, self.key + ) + self.assertEqual(request.url, expected_url) + + def test_set_endpoint_for_path(self): + endpoint_setter = self.get_endpoint_setter( + s3_config={'addressing_style': 'path'}) + request = self.get_s3_request(self.bucket, self.key) + self.call_set_endpoint(endpoint_setter, request) + expected_url = 'https://s3.us-west-2.amazonaws.com/%s/%s' % ( + self.bucket, self.key + ) + self.assertEqual(request.url, expected_url) + + def test_set_endpoint_for_accelerate(self): + endpoint_setter = self.get_endpoint_setter( + s3_config={'use_accelerate_endpoint': True}) + request = self.get_s3_request(self.bucket, self.key) + self.call_set_endpoint(endpoint_setter, request) + expected_url = 'https://%s.s3-accelerate.amazonaws.com/%s' % ( + self.bucket, self.key + ) + self.assertEqual(request.url, expected_url) + class TestContainerMetadataFetcher(unittest.TestCase): def setUp(self): @@ -1784,6 +2144,13 @@ def setUp(self): 'Token': 'spam-token', 'Expiration': 'something', } + self._expected_creds = { + 'access_key': self._creds['AccessKeyId'], + 'secret_key': self._creds['SecretAccessKey'], + 'token': self._creds['Token'], + 'expiry_time': self._creds['Expiration'], + 'role_name': self._role_name + } def tearDown(self): self._urllib3_patch.stop() @@ -1807,6 +2174,13 @@ def add_get_credentials_imds_response(self, creds=None): creds = self._creds self.add_imds_response(body=json.dumps(creds).encode('utf-8')) + def add_get_token_imds_response(self, token, status_code=200): + self.add_imds_response(body=token.encode('utf-8'), + status_code=status_code) + + def add_metadata_token_not_supported_response(self): + self.add_imds_response(b'', status_code=404) + def add_imds_connection_error(self, exception): self._imds_responses.append(exception) @@ -1834,84 +2208,63 @@ def test_disabling_env_var_not_true(self): url = 'https://example.com/' env = {'AWS_EC2_METADATA_DISABLED': 'false'} + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() self.add_get_credentials_imds_response() fetcher = InstanceMetadataFetcher(base_url=url, env=env) result = fetcher.retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_includes_user_agent_header(self): user_agent = 'my-user-agent' + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() self.add_get_credentials_imds_response() InstanceMetadataFetcher( user_agent=user_agent).retrieve_iam_role_credentials() - headers = self._send.call_args[0][0].headers - self.assertEqual(headers['User-Agent'], user_agent) + self.assertEqual(self._send.call_count, 3) + for call in self._send.calls: + self.assertTrue(call[0][0].headers['User-Agent'], user_agent) def test_non_200_response_for_role_name_is_retried(self): # Response for role name that have a non 200 status code should # be retried. + self.add_get_token_imds_response(token='token') self.add_imds_response( status_code=429, body=b'{"message": "Slow down"}') self.add_get_role_name_imds_response() self.add_get_credentials_imds_response() result = InstanceMetadataFetcher( num_attempts=2).retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_http_connection_error_for_role_name_is_retried(self): # Connection related errors should be retried + self.add_get_token_imds_response(token='token') self.add_imds_connection_error(ConnectionClosedError(endpoint_url='')) self.add_get_role_name_imds_response() self.add_get_credentials_imds_response() result = InstanceMetadataFetcher( num_attempts=2).retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_empty_response_for_role_name_is_retried(self): # Response for role name that have a non 200 status code should # be retried. + self.add_get_token_imds_response(token='token') self.add_imds_response(body=b'') self.add_get_role_name_imds_response() self.add_get_credentials_imds_response() result = InstanceMetadataFetcher( num_attempts=2).retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_non_200_response_is_retried(self): + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() # Response for creds that has a 200 status code but has an empty # body should be retried. @@ -1920,32 +2273,20 @@ def test_non_200_response_is_retried(self): self.add_get_credentials_imds_response() result = InstanceMetadataFetcher( num_attempts=2).retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_http_connection_errors_is_retried(self): + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() # Connection related errors should be retried self.add_imds_connection_error(ConnectionClosedError(endpoint_url='')) self.add_get_credentials_imds_response() result = InstanceMetadataFetcher( num_attempts=2).retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_empty_response_is_retried(self): + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() # Response for creds that has a 200 status code but is empty. # This should be retried. @@ -1953,16 +2294,10 @@ def test_empty_response_is_retried(self): self.add_get_credentials_imds_response() result = InstanceMetadataFetcher( num_attempts=2).retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_invalid_json_is_retried(self): + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() # Response for creds that has a 200 status code but is invalid JSON. # This should be retried. @@ -1970,22 +2305,17 @@ def test_invalid_json_is_retried(self): self.add_get_credentials_imds_response() result = InstanceMetadataFetcher( num_attempts=2).retrieve_iam_role_credentials() - expected_result = { - 'access_key': self._creds['AccessKeyId'], - 'secret_key': self._creds['SecretAccessKey'], - 'token': self._creds['Token'], - 'expiry_time': self._creds['Expiration'], - 'role_name': self._role_name - } - self.assertEqual(result, expected_result) + self.assertEqual(result, self._expected_creds) def test_exhaust_retries_on_role_name_request(self): + self.add_get_token_imds_response(token='token') self.add_imds_response(status_code=400, body=b'') result = InstanceMetadataFetcher( num_attempts=1).retrieve_iam_role_credentials() self.assertEqual(result, {}) def test_exhaust_retries_on_credentials_request(self): + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() self.add_imds_response(status_code=400, body=b'') result = InstanceMetadataFetcher( @@ -1993,6 +2323,7 @@ def test_exhaust_retries_on_credentials_request(self): self.assertEqual(result, {}) def test_missing_fields_in_credentials_response(self): + self.add_get_token_imds_response(token='token') self.add_get_role_name_imds_response() # Response for creds that has a 200 status code and a JSON body # representing an error. We do not necessarily want to retry this. @@ -2000,3 +2331,90 @@ def test_missing_fields_in_credentials_response(self): body=b'{"Code":"AssumeRoleUnauthorizedAccess","Message":"error"}') result = InstanceMetadataFetcher().retrieve_iam_role_credentials() self.assertEqual(result, {}) + + def test_token_is_included(self): + user_agent = 'my-user-agent' + self.add_get_token_imds_response(token='token') + self.add_get_role_name_imds_response() + self.add_get_credentials_imds_response() + + result = InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + + # Check that subsequent calls after getting the token include the token. + self.assertEqual(self._send.call_count, 3) + for call in self._send.call_args_list[1:]: + self.assertEqual(call[0][0].headers['x-aws-ec2-metadata-token'], 'token') + self.assertEqual(result, self._expected_creds) + + def test_metadata_token_not_supported_404(self): + user_agent = 'my-user-agent' + self.add_imds_response(b'', status_code=404) + self.add_get_role_name_imds_response() + self.add_get_credentials_imds_response() + + result = InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + + for call in self._send.call_args_list[1:]: + self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers) + self.assertEqual(result, self._expected_creds) + + def test_metadata_token_not_supported_403(self): + user_agent = 'my-user-agent' + self.add_imds_response(b'', status_code=403) + self.add_get_role_name_imds_response() + self.add_get_credentials_imds_response() + + result = InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + + for call in self._send.call_args_list[1:]: + self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers) + self.assertEqual(result, self._expected_creds) + + def test_metadata_token_not_supported_405(self): + user_agent = 'my-user-agent' + self.add_imds_response(b'', status_code=405) + self.add_get_role_name_imds_response() + self.add_get_credentials_imds_response() + + result = InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + + for call in self._send.call_args_list[1:]: + self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers) + self.assertEqual(result, self._expected_creds) + + def test_metadata_token_not_supported_timeout(self): + user_agent = 'my-user-agent' + self.add_imds_connection_error(ReadTimeoutError(endpoint_url='url')) + self.add_get_role_name_imds_response() + self.add_get_credentials_imds_response() + + result = InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + + for call in self._send.call_args_list[1:]: + self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers) + self.assertEqual(result, self._expected_creds) + + def test_token_not_supported_exhaust_retries(self): + user_agent = 'my-user-agent' + self.add_imds_connection_error(ConnectTimeoutError(endpoint_url='url')) + self.add_get_role_name_imds_response() + self.add_get_credentials_imds_response() + + result = InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + + for call in self._send.call_args_list[1:]: + self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers) + self.assertEqual(result, self._expected_creds) + + def test_metadata_token_bad_request_yields_no_credentials(self): + user_agent = 'my-user-agent' + self.add_imds_response(b'', status_code=400) + result = InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + self.assertEqual(result, {})