diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6a9834d26c..d97dd7d65b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -10098,4 +10098,3 @@ CHANGELOG * feature:``EMR``: Added support for adding EBS storage to EMR instances. * bugfix:pagination: Refactored pagination to handle non-string service tokens. * bugfix:credentials: Fix race condition in credential provider. - diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 826378380e..5b627cfa60 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,4 +2,3 @@ This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. - diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 7dfd47830d..33bf64d35d 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -33,7 +33,7 @@ contributions as well: fixed upstream. * Changes to paginators, waiters, and endpoints are also generated upstream based on our internal knowledge of the AWS services. These include any of the following files in ``botocore/data/``: - + * ``_endpoints.json`` * ``*.paginators-1.json`` * ``*.waiters-2.json`` diff --git a/README.rst b/README.rst index 8787ae2a3f..b152c8908a 100644 --- a/README.rst +++ b/README.rst @@ -38,10 +38,10 @@ Assuming that you have Python and ``virtualenv`` installed, set up your environm .. code-block:: sh $ pip install botocore - + Using Botocore ~~~~~~~~~~~~~~ -After installing botocore +After installing botocore Next, set up credentials (in e.g. ``~/.aws/credentials``): @@ -57,7 +57,7 @@ Then, set up a default region (in e.g. ``~/.aws/config``): [default] region=us-east-1 - + Other credentials configuration method can be found `here `__ Then, from a Python interpreter: @@ -87,7 +87,7 @@ applicable for ``botocore``: Contributing ------------ -We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING `__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution. +We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING `__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution. Maintenance and Support for SDK Major Versions @@ -107,4 +107,3 @@ More Resources * `NOTICE `__ * `Changelog `__ * `License `__ - diff --git a/botocore/__init__.py b/botocore/__init__.py index 57e9576f20..840597f915 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -23,6 +23,7 @@ class NullHandler(logging.Handler): def emit(self, record): pass + # Configure default logger to do nothing log = logging.getLogger('botocore') log.addHandler(NullHandler()) diff --git a/botocore/args.py b/botocore/args.py index 3baaa559dc..56e27ddaa6 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -72,7 +72,7 @@ def get_client_args(self, service_model, region_name, is_secure, service_model, client_config, endpoint_bridge, region_name, endpoint_url, is_secure, scoped_config) - service_name = final_args['service_name'] + service_name = final_args['service_name'] # noqa parameter_validation = final_args['parameter_validation'] endpoint_config = final_args['endpoint_config'] protocol = final_args['protocol'] diff --git a/botocore/auth.py b/botocore/auth.py index 9f5db94636..6623cac0c4 100644 --- a/botocore/auth.py +++ b/botocore/auth.py @@ -18,18 +18,21 @@ from email.utils import formatdate from hashlib import sha1, sha256 import hmac -from io import BytesIO import logging from operator import itemgetter import time -from botocore.compat import( +from botocore.compat import ( encodebytes, ensure_unicode, HTTPHeaders, json, parse_qs, quote, - six, unquote, urlsplit, urlunsplit, HAS_CRT, MD5_AVAILABLE + six, unquote, urlsplit, urlunsplit, HAS_CRT ) from botocore.exceptions import NoCredentialsError from botocore.utils import normalize_url_path, percent_encode_sequence +# Imports for backwards compatibility +from botocore.compat import MD5_AVAILABLE # noqa + + logger = logging.getLogger(__name__) @@ -113,8 +116,9 @@ def calc_signature(self, request, params): if key == 'Signature': continue value = six.text_type(params[key]) - pairs.append(quote(key.encode('utf-8'), safe='') + '=' + - quote(value.encode('utf-8'), safe='-_~')) + quoted_key = quote(key.encode('utf-8'), safe='') + quoted_value = quote(value.encode('utf-8'), safe='-_~') + pairs.append(f'{quoted_key}={quoted_value}') qs = '&'.join(pairs) string_to_sign += qs logger.debug('String to sign: %s', string_to_sign) @@ -275,9 +279,10 @@ def _header_value(self, value): return ' '.join(value.split()) def signed_headers(self, headers_to_sign): - l = ['%s' % n.lower().strip() for n in set(headers_to_sign)] - l = sorted(l) - return ';'.join(l) + headers = sorted( + [n.lower().strip() for n in set(headers_to_sign)] + ) + return ';'.join(headers) def payload(self, request): if not self._should_sha256_sign_payload(request): @@ -387,11 +392,11 @@ def add_auth(self, request): self._inject_signature_to_request(request, signature) def _inject_signature_to_request(self, request, signature): - l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)] + auth_str = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)] headers_to_sign = self.headers_to_sign(request) - l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign)) - l.append('Signature=%s' % signature) - request.headers['Authorization'] = ', '.join(l) + auth_str.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign)) + auth_str.append('Signature=%s' % signature) + request.headers['Authorization'] = ', '.join(auth_str) return request def _modify_request_before_signing(self, request): diff --git a/botocore/awsrequest.py b/botocore/awsrequest.py index f47f0cc01e..daa77294f3 100644 --- a/botocore/awsrequest.py +++ b/botocore/awsrequest.py @@ -12,10 +12,8 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import io -import sys import logging import functools -import socket import urllib3.util from urllib3.connection import VerifiedHTTPSConnection @@ -25,8 +23,10 @@ import botocore.utils from botocore.compat import six -from botocore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit, \ - urlencode, MutableMapping +from botocore.compat import ( + HTTPHeaders, HTTPResponse, urlunsplit, urlsplit, + urlencode, MutableMapping +) from botocore.exceptions import UnseekableStreamError @@ -207,8 +207,10 @@ def _is_100_continue_status(self, maybe_status_line): parts = maybe_status_line.split(None, 2) # Check for HTTP/ 100 Continue\r\n return ( - len(parts) >= 3 and parts[0].startswith(b'HTTP/') and - parts[1] == b'100') + len(parts) >= 3 + and parts[0].startswith(b'HTTP/') + and parts[1] == b'100' + ) class AWSHTTPConnection(AWSConnection, HTTPConnection): @@ -400,7 +402,7 @@ def _determine_content_length(self, body): # Try asking the body for it's length try: return len(body) - except (AttributeError, TypeError) as e: + except (AttributeError, TypeError): pass # Try getting the length from a seekable stream diff --git a/botocore/client.py b/botocore/client.py index 9584658f86..b154344447 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -11,7 +11,6 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging -import functools from botocore import waiter, xform_name from botocore.args import ClientArgsCreator @@ -20,9 +19,8 @@ from botocore.docs.docstring import ClientMethodDocstring from botocore.docs.docstring import PaginatorDocstring from botocore.exceptions import ( - ClientError, DataNotFoundError, OperationNotPageableError, - UnknownSignatureVersionError, InvalidEndpointDiscoveryConfigurationError, - UnknownFIPSEndpointError, + DataNotFoundError, OperationNotPageableError, UnknownSignatureVersionError, + InvalidEndpointDiscoveryConfigurationError, UnknownFIPSEndpointError, ) from botocore.hooks import first_non_none_response from botocore.model import ServiceModel @@ -32,11 +30,6 @@ S3ArnParamHandler, S3EndpointSetter, ensure_boolean, S3ControlArnParamHandler, S3ControlEndpointSetter, ) -from botocore.args import ClientArgsCreator -from botocore import UNSIGNED -# Keep this imported. There's pre-existing code that uses -# "from botocore.client import Config". -from botocore.config import Config from botocore.history import get_global_history_recorder from botocore.discovery import ( EndpointDiscoveryHandler, EndpointDiscoveryManager, @@ -45,6 +38,15 @@ from botocore.retries import standard from botocore.retries import adaptive +# Keep these imported. There's pre-existing code that uses: +# "from botocore.client import Config" +# "from botocore.client import ClientError" +# etc. +from botocore.config import Config # noqa +from botocore.exceptions import ClientError # noqa +from botocore.args import ClientArgsCreator # noqa +from botocore import UNSIGNED # noqa + logger = logging.getLogger(__name__) history_recorder = get_global_history_recorder() @@ -465,8 +467,9 @@ def _create_endpoint(self, resolved, service_name, region_name, else: # Use the sslCommonName over the hostname for Python 2.6 compat. hostname = resolved.get('sslCommonName', resolved.get('hostname')) - endpoint_url = self._make_url(hostname, is_secure, - resolved.get('protocols', [])) + endpoint_url = self._make_url( + hostname, is_secure, resolved.get('protocols', []) + ) signature_version = self._resolve_signature_version( service_name, resolved) signing_name = self._resolve_signing_name(service_name, resolved) diff --git a/botocore/configprovider.py b/botocore/configprovider.py index b390ea0cc8..6d6a906e27 100644 --- a/botocore/configprovider.py +++ b/botocore/configprovider.py @@ -88,7 +88,7 @@ # Note: These configurations are considered internal to botocore. # Do not use them until publicly documented. 'csm_enabled': ( - 'csm_enabled', 'AWS_CSM_ENABLED', False, utils.ensure_boolean), + 'csm_enabled', 'AWS_CSM_ENABLED', False, utils.ensure_boolean), 'csm_host': ('csm_host', 'AWS_CSM_HOST', '127.0.0.1', None), 'csm_port': ('csm_port', 'AWS_CSM_PORT', 31000, int), 'csm_client_id': ('csm_client_id', 'AWS_CSM_CLIENT_ID', '', None), @@ -145,6 +145,7 @@ 'proxy_use_forwarding_for_https', None, None, utils.normalize_boolean), } + def create_botocore_default_config_mapping(session): chain_builder = ConfigChainFactory(session=session) config_mapping = _create_config_chain_mapping( diff --git a/botocore/credentials.py b/botocore/credentials.py index 616f178f96..47a7b47161 100644 --- a/botocore/credentials.py +++ b/botocore/credentials.py @@ -529,7 +529,7 @@ def _protected_refresh(self, is_mandatory): # the self._refresh_lock. try: metadata = self._refresh_using() - except Exception as e: + except Exception: period_name = 'mandatory' if is_mandatory else 'advisory' logger.warning("Refreshing temporary credentials failed " "during %s refresh period.", @@ -1486,10 +1486,10 @@ def _get_role_config(self, profile_name): } if duration_seconds is not None: - try: - role_config['duration_seconds'] = int(duration_seconds) - except ValueError: - pass + try: + role_config['duration_seconds'] = int(duration_seconds) + except ValueError: + pass # Either the credential source or the source profile must be # specified, but not both. @@ -1857,7 +1857,6 @@ def _retrieve_or_fail(self): ) def _build_headers(self): - headers = {} auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN) if auth_token is not None: return { diff --git a/botocore/crt/auth.py b/botocore/crt/auth.py index 1feb17db05..c22490744e 100644 --- a/botocore/crt/auth.py +++ b/botocore/crt/auth.py @@ -9,6 +9,7 @@ from botocore.utils import percent_encode_sequence from botocore.exceptions import NoCredentialsError + class CrtSigV4Auth(BaseSigner): REQUIRES_REGION = True _PRESIGNED_HEADERS_BLOCKLIST = [ @@ -73,7 +74,7 @@ def add_auth(self, request): signed_body_value=explicit_payload, signed_body_header_type=body_header, expiration_in_seconds=self._expiration_in_seconds, - ) + ) crt_request = self._crt_request_from_aws_request(request) future = awscrt.auth.aws_sign_request(crt_request, signing_config) future.result() @@ -256,7 +257,7 @@ def add_auth(self, request): signed_body_value=explicit_payload, signed_body_header_type=body_header, expiration_in_seconds=self._expiration_in_seconds, - ) + ) crt_request = self._crt_request_from_aws_request(request) future = awscrt.auth.aws_sign_request(crt_request, signing_config) future.result() @@ -374,6 +375,7 @@ def _should_add_content_sha256_header(self, explicit_payload): # Always add X-Amz-Content-SHA256 header return True + class CrtSigV4AsymQueryAuth(CrtSigV4AsymAuth): DEFAULT_EXPIRES = 3600 _SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS diff --git a/botocore/docs/client.py b/botocore/docs/client.py index f136bcd2c1..b8c71daac0 100644 --- a/botocore/docs/client.py +++ b/botocore/docs/client.py @@ -10,8 +10,6 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import inspect - from botocore.docs.utils import get_official_service_name from botocore.docs.method import document_custom_method from botocore.docs.method import document_model_driven_method diff --git a/botocore/docs/method.py b/botocore/docs/method.py index dbadd94313..a314745285 100644 --- a/botocore/docs/method.py +++ b/botocore/docs/method.py @@ -180,9 +180,9 @@ def document_model_driven_method(section, method_name, operation_model, if operation_model.deprecated: method_intro_section.style.start_danger() method_intro_section.writeln( - 'This operation is deprecated and may not function as ' - 'expected. This operation should not be used going forward ' - 'and is only kept for the purpose of backwards compatiblity.') + 'This operation is deprecated and may not function as ' + 'expected. This operation should not be used going forward ' + 'and is only kept for the purpose of backwards compatiblity.') method_intro_section.style.end_danger() service_uid = operation_model.service_model.metadata.get('uid') if service_uid is not None: diff --git a/botocore/docs/service.py b/botocore/docs/service.py index 0d233e51d0..bfa343abcc 100644 --- a/botocore/docs/service.py +++ b/botocore/docs/service.py @@ -11,7 +11,6 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from botocore.exceptions import DataNotFoundError -from botocore.docs.utils import get_official_service_name from botocore.docs.client import ClientDocumenter from botocore.docs.client import ClientExceptionsDocumenter from botocore.docs.waiter import WaiterDocumenter diff --git a/botocore/endpoint.py b/botocore/endpoint.py index 47485a6887..5ab370354b 100644 --- a/botocore/endpoint.py +++ b/botocore/endpoint.py @@ -150,8 +150,7 @@ def _send_request(self, request_dict, operation_model): 'ResponseMetadata' in success_response[1]: # We want to share num retries, not num attempts. total_retries = attempts - 1 - success_response[1]['ResponseMetadata']['RetryAttempts'] = \ - total_retries + success_response[1]['ResponseMetadata']['RetryAttempts'] = total_retries if exception is not None: raise exception else: diff --git a/botocore/eventstream.py b/botocore/eventstream.py index 91e88b6ccd..c41435643d 100644 --- a/botocore/eventstream.py +++ b/botocore/eventstream.py @@ -464,7 +464,7 @@ def _parse_prelude(self): prelude = MessagePrelude(*raw_prelude) self._validate_prelude(prelude) # The minus 4 removes the prelude crc from the bytes to be checked - _validate_checksum(prelude_bytes[:_PRELUDE_LENGTH-4], prelude.crc) + _validate_checksum(prelude_bytes[:_PRELUDE_LENGTH - 4], prelude.crc) return prelude def _parse_headers(self): @@ -484,7 +484,7 @@ def _parse_message_crc(self): def _parse_message_bytes(self): # The minus 4 includes the prelude crc to the bytes to be checked - message_bytes = self._data[_PRELUDE_LENGTH-4:self._prelude.payload_end] + message_bytes = self._data[_PRELUDE_LENGTH - 4:self._prelude.payload_end] return message_bytes def _validate_message_crc(self): diff --git a/botocore/exceptions.py b/botocore/exceptions.py index 62c39c5ca1..fa539fe4d9 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -78,6 +78,7 @@ class ApiVersionNotFoundError(BotoCoreError): class HTTPClientError(BotoCoreError): fmt = 'An HTTP Client raised an unhandled exception: {error}' + def __init__(self, request=None, response=None, **kwargs): self.request = request self.response = response @@ -97,8 +98,10 @@ class InvalidIMDSEndpointError(BotoCoreError): class InvalidIMDSEndpointModeError(BotoCoreError): - fmt = ('Invalid EC2 Instance Metadata endpoint mode: {mode}' - ' Valid endpoint modes (case-insensitive): {valid_modes}.') + fmt = ( + 'Invalid EC2 Instance Metadata endpoint mode: {mode}' + ' Valid endpoint modes (case-insensitive): {valid_modes}.' + ) class EndpointConnectionError(ConnectionError): diff --git a/botocore/handlers.py b/botocore/handlers.py index 3a5180ec4d..17b1df3a30 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -25,7 +25,7 @@ from botocore.compat import ( unquote, json, six, unquote_str, ensure_bytes, get_md5, - MD5_AVAILABLE, OrderedDict, urlsplit, urlunsplit, XMLParseError, + OrderedDict, urlsplit, urlunsplit, XMLParseError, ETree, ) from botocore.docs.utils import AutoPopulatedParam @@ -37,19 +37,22 @@ from botocore.exceptions import ParamValidationError from botocore.exceptions import AliasConflictParameterError from botocore.exceptions import UnsupportedTLSVersionWarning -from botocore.exceptions import MissingServiceIdError from botocore.utils import percent_encode, SAFE_CHARS from botocore.utils import switch_host_with_param -from botocore.utils import hyphenize_service_id from botocore.utils import conditionally_calculate_md5 from botocore.utils import is_global_accesspoint -from botocore import retryhandler from botocore import utils -from botocore import translate import botocore import botocore.auth +# Keep these imported. There's pre-existing code that uses them. +from botocore import retryhandler # noqa +from botocore import translate # noqa +from botocore.compat import MD5_AVAILABLE # noqa +from botocore.exceptions import MissingServiceIdError # noqa +from botocore.utils import hyphenize_service_id # noqa + logger = logging.getLogger(__name__) @@ -277,8 +280,10 @@ def _sse_md5(params, sse_member_prefix='SSECustomer'): def _needs_s3_sse_customization(params, sse_member_prefix): - return (params.get(sse_member_prefix + 'Key') is not None and - sse_member_prefix + 'KeyMD5' not in params) + return ( + params.get(sse_member_prefix + 'Key') is not None + and sse_member_prefix + 'KeyMD5' not in params + ) def disable_signing(**kwargs): @@ -557,7 +562,7 @@ def validate_ascii_metadata(params, **kwargs): try: key.encode('ascii') value.encode('ascii') - except UnicodeEncodeError as e: + except UnicodeEncodeError: error_msg = ( 'Non ascii characters found in S3 metadata ' 'for key "%s", value: "%s". \nS3 metadata can only ' @@ -764,8 +769,7 @@ def decode_list_object_versions(parsed, context, **kwargs): def _decode_list_object(top_level_keys, nested_keys, parsed, context): - if parsed.get('EncodingType') == 'url' and \ - context.get('encoding_type_auto_set'): + if parsed.get('EncodingType') == 'url' and context.get('encoding_type_auto_set'): # URL decode top-level keys in the response if present. for key in top_level_keys: if key in parsed: @@ -1057,8 +1061,7 @@ def remove_lex_v2_start_conversation(class_attributes, **kwargs): AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param), # S3 SSE Copy Source documentation modifications ('docs.*.s3.*.complete-section', - AutoPopulatedParam( - 'CopySourceSSECustomerKeyMD5').document_auto_populated_param), + AutoPopulatedParam('CopySourceSSECustomerKeyMD5').document_auto_populated_param), # Add base64 information to Lambda ('docs.*.lambda.UpdateFunctionCode.complete-section', document_base64_encoding('ZipFile')), @@ -1130,10 +1133,9 @@ def remove_lex_v2_start_conversation(class_attributes, **kwargs): ########### # SMS Voice - ########## + ########### ('docs.title.sms-voice', - DeprecatedServiceDocumenter( - 'pinpoint-sms-voice').inject_deprecation_notice), + DeprecatedServiceDocumenter('pinpoint-sms-voice').inject_deprecation_notice), ('before-call', inject_api_version_header_if_needed), ] diff --git a/botocore/hooks.py b/botocore/hooks.py index 85bda7e504..1776199f80 100644 --- a/botocore/hooks.py +++ b/botocore/hooks.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import copy import logging -from collections import defaultdict, deque, namedtuple +from collections import deque, namedtuple from botocore.compat import accepts_kwargs, six from botocore.utils import EVENT_ALIASES @@ -428,9 +428,11 @@ def _alias_event_name(self, event_name): def _replace_subsection(self, sections, old_parts, new_part): for i in range(len(sections)): - if sections[i] == old_parts[0] and \ - sections[i:i+len(old_parts)] == old_parts: - sections[i:i+len(old_parts)] = [new_part] + if ( + sections[i] == old_parts[0] and + sections[i:i + len(old_parts)] == old_parts + ): + sections[i:i + len(old_parts)] = [new_part] return def __copy__(self): @@ -521,8 +523,9 @@ def _get_items(self, starting_node, key_parts, collected, starting_index): # will result in final_list = [3, 2, 1], which is # why we reverse the lists. node_list = current_node['values'] - complete_order = (node_list.first + node_list.middle + - node_list.last) + complete_order = ( + node_list.first + node_list.middle + node_list.last + ) collected.extendleft(reversed(complete_order)) if not index == key_parts_len: children = current_node['children'] diff --git a/botocore/httpsession.py b/botocore/httpsession.py index b145120169..d7d859d36e 100644 --- a/botocore/httpsession.py +++ b/botocore/httpsession.py @@ -5,7 +5,7 @@ from base64 import b64encode import sys -from urllib3 import PoolManager, ProxyManager, proxy_from_url, Timeout +from urllib3 import PoolManager, proxy_from_url, Timeout from urllib3.util.retry import Retry from urllib3.util.ssl_ import ( ssl, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, DEFAULT_CIPHERS, @@ -21,7 +21,6 @@ from urllib3.util.ssl_ import SSLContext import botocore.awsrequest -from botocore.vendored import six from botocore.vendored.six.moves.urllib_parse import unquote from botocore.compat import filter_ssl_warnings, urlparse from botocore.exceptions import ( @@ -178,18 +177,20 @@ class URLLib3Session(object): v2.7.0 implemented this themselves, later version urllib3 support this directly via a flag to urlopen so enabling it if needed should be trivial. """ - def __init__(self, - verify=True, - proxies=None, - timeout=None, - max_pool_connections=MAX_POOL_CONNECTIONS, - socket_options=None, - client_cert=None, - proxies_config=None, + def __init__( + self, + verify=True, + proxies=None, + timeout=None, + max_pool_connections=MAX_POOL_CONNECTIONS, + socket_options=None, + client_cert=None, + proxies_config=None, ): self._verify = verify - self._proxy_config = ProxyConfiguration(proxies=proxies, - proxies_settings=proxies_config) + self._proxy_config = ProxyConfiguration( + proxies=proxies, proxies_settings=proxies_config + ) self._pool_classes_by_scheme = { 'http': botocore.awsrequest.AWSHTTPConnectionPool, 'https': botocore.awsrequest.AWSHTTPSConnectionPool, diff --git a/botocore/model.py b/botocore/model.py index 57576181f6..b9641dfb0e 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -384,8 +384,10 @@ def endpoint_discovery_operation(self): def endpoint_discovery_required(self): for operation in self.operation_names: model = self.operation_model(operation) - if (model.endpoint_discovery is not None and - model.endpoint_discovery.get('required')): + if ( + model.endpoint_discovery is not None + and model.endpoint_discovery.get('required') + ): return True return False @@ -415,7 +417,6 @@ def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.service_name) - class OperationModel(object): def __init__(self, operation_model, service_model, name=None): """ @@ -522,9 +523,11 @@ def idempotent_members(self): if not input_shape: return [] - return [name for (name, shape) in input_shape.members.items() - if 'idempotencyToken' in shape.metadata and - shape.metadata['idempotencyToken']] + return [ + name for (name, shape) in input_shape.members.items() + if 'idempotencyToken' in shape.metadata + and shape.metadata['idempotencyToken'] + ] @CachedProperty def auth_type(self): diff --git a/botocore/paginate.py b/botocore/paginate.py index b08c7ed8b7..6dd39f2981 100644 --- a/botocore/paginate.py +++ b/botocore/paginate.py @@ -274,12 +274,14 @@ def __iter__(self): num_current_response = len(current_response) truncate_amount = 0 if self._max_items is not None: - truncate_amount = (total_items + num_current_response) \ - - self._max_items + truncate_amount = ( + total_items + num_current_response - self._max_items + ) if truncate_amount > 0: - self._truncate_response(parsed, primary_result_key, - truncate_amount, starting_truncation, - next_token) + self._truncate_response( + parsed, primary_result_key, truncate_amount, + starting_truncation, next_token + ) yield response break else: diff --git a/botocore/parsers.py b/botocore/parsers.py index 10b1716c58..66af9893e1 100644 --- a/botocore/parsers.py +++ b/botocore/parsers.py @@ -701,7 +701,7 @@ def _parse_body_as_json(self, body_contents): except ValueError: # if the body cannot be parsed, include # the literal string as the message - return { 'message': body } + return {'message': body} class BaseEventStreamParser(ResponseParser): @@ -998,7 +998,7 @@ def _do_error_parse(self, response, shape): # the error response from other sources like the HTTP status code. try: return self._parse_error_from_body(response) - except ResponseParserError as e: + except ResponseParserError: LOG.debug( 'Exception caught when parsing error response body:', exc_info=True) diff --git a/botocore/regions.py b/botocore/regions.py index 57721f2926..c2fa3adf79 100644 --- a/botocore/regions.py +++ b/botocore/regions.py @@ -129,8 +129,9 @@ def construct_endpoint(self, service_name, region_name=None, partition_name=None valid_partition = partition if valid_partition is not None: - result = self._endpoint_for_partition(valid_partition, service_name, - region_name, True) + result = self._endpoint_for_partition( + valid_partition, service_name, region_name, True + ) return result return None @@ -141,8 +142,9 @@ def construct_endpoint(self, service_name, region_name=None, partition_name=None if result: return result - def _endpoint_for_partition(self, partition, service_name, region_name, - force_partition=False): + def _endpoint_for_partition( + self, partition, service_name, region_name, force_partition=False + ): # Get the service from the partition, or an empty template. service_data = partition['services'].get( service_name, DEFAULT_SERVICE_DATA) @@ -187,7 +189,7 @@ def _resolve(self, partition, service_name, service_data, endpoint_name): self._merge_keys(partition.get('defaults', {}), result) hostname = result.get('hostname', DEFAULT_URI_TEMPLATE) result['hostname'] = self._expand_template( - partition, result['hostname'], service_name, endpoint_name) + partition, hostname, service_name, endpoint_name) if 'sslCommonName' in result: result['sslCommonName'] = self._expand_template( partition, result['sslCommonName'], service_name, diff --git a/botocore/response.py b/botocore/response.py index 0a6b326648..be32345c41 100644 --- a/botocore/response.py +++ b/botocore/response.py @@ -12,16 +12,18 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import sys import logging -from botocore import ScalarTypes -from botocore.hooks import first_non_none_response -from botocore.compat import json, set_socket_timeout, XMLParseError +from botocore.compat import set_socket_timeout from botocore.exceptions import IncompleteReadError, ReadTimeoutError from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError from botocore import parsers +# Keep these imported. There's pre-existing code that uses them. +from botocore import ScalarTypes # noqa +from botocore.compat import XMLParseError # noqa +from botocore.hooks import first_non_none_response # noqa + logger = logging.getLogger(__name__) diff --git a/botocore/retries/adaptive.py b/botocore/retries/adaptive.py index d85e2e4922..1870147a08 100644 --- a/botocore/retries/adaptive.py +++ b/botocore/retries/adaptive.py @@ -59,10 +59,8 @@ def on_receiving_response(self, **kwargs): timestamp = self._clock.current_time() with self._lock: if not self._throttling_detector.is_throttling_error(**kwargs): - throttling = False new_rate = self._rate_adjustor.success_received(timestamp) else: - throttling = True if not self._enabled: rate_to_use = measured_rate else: diff --git a/botocore/retries/base.py b/botocore/retries/base.py index eec9da9794..008e85e421 100644 --- a/botocore/retries/base.py +++ b/botocore/retries/base.py @@ -24,4 +24,4 @@ def is_retryable(self, context): :type context: RetryContext """ - raise NotImplementedError("is_retryable") \ No newline at end of file + raise NotImplementedError("is_retryable") diff --git a/botocore/retries/bucket.py b/botocore/retries/bucket.py index 338a4401a3..1ddc663bb1 100644 --- a/botocore/retries/bucket.py +++ b/botocore/retries/bucket.py @@ -51,8 +51,10 @@ def max_rate(self, value): self._max_capacity = 1 # If we're scaling down, we also can't have a capacity that's # more than our max_capacity. - self._current_capacity = min(self._current_capacity, - self._max_capacity) + self._current_capacity = min( + self._current_capacity, + self._max_capacity + ) self._new_fill_rate_condition.notify() @property diff --git a/botocore/retries/standard.py b/botocore/retries/standard.py index 8a1757418a..c522d06bc0 100644 --- a/botocore/retries/standard.py +++ b/botocore/retries/standard.py @@ -382,8 +382,7 @@ def detect_error_type(self, context): # Check if this error code matches the shape. This can # be either by name or by a modeled error code. error_code_to_check = ( - shape.metadata.get('error', {}).get('code') - or shape.name + shape.metadata.get('error', {}).get('code') or shape.name ) if error_code == error_code_to_check: if shape.metadata['retryable'].get('throttling'): @@ -430,8 +429,10 @@ def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS): ]) def is_retryable(self, context): - return (self._max_attempts_checker.is_retryable(context) and - self._additional_checkers.is_retryable(context)) + return ( + self._max_attempts_checker.is_retryable(context) + and self._additional_checkers.is_retryable(context) + ) class OrRetryChecker(BaseRetryableChecker): diff --git a/botocore/retryhandler.py b/botocore/retryhandler.py index d7385b20ff..cef29a7999 100644 --- a/botocore/retryhandler.py +++ b/botocore/retryhandler.py @@ -140,7 +140,7 @@ def _create_single_response_checker(response): checker = CRC32Checker(header=response['crc32body']) else: # TODO: send a signal. - raise ValueError("Unknown retry policy: %s" % config) + raise ValueError("Unknown retry policy") return checker diff --git a/botocore/serialize.py b/botocore/serialize.py index a228a47bf8..6590ce4590 100644 --- a/botocore/serialize.py +++ b/botocore/serialize.py @@ -217,8 +217,9 @@ def serialize_to_request(self, parameters, operation_model): if host_prefix is not None: serialized['host_prefix'] = host_prefix - serialized = self._prepare_additional_traits(serialized, - operation_model) + serialized = self._prepare_additional_traits( + serialized, operation_model + ) return serialized def _serialize(self, serialized, value, shape, prefix=''): @@ -352,8 +353,9 @@ def serialize_to_request(self, parameters, operation_model): if host_prefix is not None: serialized['host_prefix'] = host_prefix - serialized = self._prepare_additional_traits(serialized, - operation_model) + serialized = self._prepare_additional_traits( + serialized, operation_model + ) return serialized def _serialize(self, serialized, value, shape, key=None): @@ -474,8 +476,9 @@ def serialize_to_request(self, parameters, operation_model): if host_prefix is not None: serialized['host_prefix'] = host_prefix - serialized = self._prepare_additional_traits(serialized, - operation_model) + serialized = self._prepare_additional_traits( + serialized, operation_model + ) return serialized def _render_uri_template(self, uri_template, params): @@ -542,15 +545,15 @@ def _partition_parameters(self, partitioned, param_name, if isinstance(param_value, dict): partitioned['query_string_kwargs'].update(param_value) elif isinstance(param_value, bool): - partitioned['query_string_kwargs'][ - key_name] = str(param_value).lower() + bool_str = str(param_value).lower() + partitioned['query_string_kwargs'][key_name] = bool_str elif member.type_name == 'timestamp': timestamp_format = member.serialization.get( 'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT) - partitioned['query_string_kwargs'][ - key_name] = self._convert_timestamp_to_str( - param_value, timestamp_format - ) + timestamp = self._convert_timestamp_to_str( + param_value, timestamp_format + ) + partitioned['query_string_kwargs'][key_name] = timestamp else: partitioned['query_string_kwargs'][key_name] = param_value elif location == 'header': diff --git a/botocore/session.py b/botocore/session.py index 29770a1136..95450e0056 100644 --- a/botocore/session.py +++ b/botocore/session.py @@ -20,7 +20,6 @@ import logging import os import platform -import re import socket import warnings @@ -49,7 +48,6 @@ from botocore import paginate from botocore import waiter from botocore import retryhandler, translate -from botocore import utils from botocore.utils import EVENT_ALIASES, validate_region_name from botocore.compat import MutableMapping, HAS_CRT @@ -155,7 +153,7 @@ def _create_credential_resolver(self): def _register_data_loader(self): self._components.lazy_register_component( 'data_loader', - lambda: create_loader(self.get_config_variable('data_path'))) + lambda: create_loader(self.get_config_variable('data_path'))) def _register_endpoint_resolver(self): def create_default_resolver(): diff --git a/botocore/signers.py b/botocore/signers.py index b055fea4bd..d50e81ea1e 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -22,7 +22,10 @@ from botocore.exceptions import UnknownSignatureVersionError from botocore.exceptions import UnknownClientMethodError from botocore.exceptions import UnsupportedSignatureVersionError -from botocore.utils import fix_s3_host, datetime2timestamp +from botocore.utils import datetime2timestamp + +# Keep these imported. There's pre-existing code that uses them. +from botocore.utils import fix_s3_host # noqa class RequestSigner(object): @@ -330,8 +333,9 @@ def generate_presigned_url(self, url, date_less_than=None, policy=None): :rtype: str :return: The signed URL. """ - if (date_less_than is not None and policy is not None or - date_less_than is None and policy is None): + both_args_supplied = date_less_than is not None and policy is not None + neither_arg_supplied = date_less_than is None and policy is None + if both_args_supplied or neither_arg_supplied: e = 'Need to provide either date_less_than or policy, but not both' raise ValueError(e) if date_less_than is not None: @@ -347,7 +351,7 @@ def generate_presigned_url(self, url, date_less_than=None, policy=None): params.extend([ 'Signature=%s' % self._url_b64encode(signature).decode('utf8'), 'Key-Pair-Id=%s' % self.key_id, - ]) + ]) return self._build_url(url, params) def _build_url(self, base_url, extra_params): diff --git a/botocore/stub.py b/botocore/stub.py index ca7536bb0f..f35ac1c407 100644 --- a/botocore/stub.py +++ b/botocore/stub.py @@ -35,6 +35,7 @@ def __ne__(self, other): def __repr__(self): return '' + ANY = _ANY() @@ -326,9 +327,10 @@ def _assert_expected_call_order(self, model, params): raise UnStubbedResponseError( operation_name=model.name, reason=( - 'Unexpected API Call: A call was made but no additional calls expected. ' - 'Either the API Call was not stubbed or it was called multiple times.' - ) + 'Unexpected API Call: A call was made but no additional ' + 'calls expected. Either the API Call was not stubbed or ' + 'it was called multiple times.' + ) ) name = self._queue[0]['operation_name'] diff --git a/botocore/utils.py b/botocore/utils.py index 4b0250a56b..725c313fd4 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -23,7 +23,6 @@ import os import socket import cgi -import warnings import dateutil.parser from dateutil.tz import tzutc @@ -32,9 +31,9 @@ import botocore.awsrequest import botocore.httpsession from botocore.compat import ( - json, quote, zip_longest, urlsplit, urlunsplit, OrderedDict, - six, urlparse, get_tzinfo_options, get_md5, MD5_AVAILABLE, - HAS_CRT + json, quote, zip_longest, urlsplit, urlunsplit, OrderedDict, + six, urlparse, get_tzinfo_options, get_md5, MD5_AVAILABLE, + HAS_CRT ) from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass from botocore.exceptions import ( @@ -208,7 +207,7 @@ def resolve_imds_endpoint_mode(session): raise InvalidIMDSEndpointModeError(**error_msg_kwargs) return lendpoint_mode elif session.get_config_variable('imds_use_ipv6'): - return 'ipv6' + return 'ipv6' return 'ipv4' @@ -221,10 +220,12 @@ def is_json_value_header(shape): :return: True if this type is a jsonvalue, False otherwise :rtype: Bool """ - return (hasattr(shape, 'serialization') and - shape.serialization.get('jsonvalue', False) and - shape.serialization.get('location') == 'header' and - shape.type_name == 'string') + return ( + hasattr(shape, 'serialization') and + shape.serialization.get('jsonvalue', False) and + shape.serialization.get('location') == 'header' and + shape.type_name == 'string' + ) def get_service_module_name(service_model): @@ -1020,6 +1021,7 @@ def is_valid_ipv6_endpoint_url(endpoint_url): netloc = urlparse(endpoint_url).netloc return IPV6_ADDRZ_RE.match(netloc) is not None + def is_valid_endpoint_url(endpoint_url): """Verify the endpoint_url is valid. @@ -1047,9 +1049,11 @@ def is_valid_endpoint_url(endpoint_url): re.IGNORECASE) return allowed.match(hostname) + def is_valid_uri(endpoint_url): return is_valid_endpoint_url(endpoint_url) or is_valid_ipv6_endpoint_url(endpoint_url) + def validate_region_name(region_name): """Provided region_name must be a valid host label.""" if region_name is None: @@ -1220,7 +1224,7 @@ def switch_host_s3_accelerate(request, operation_name, **kwargs): if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']: return - _switch_hosts(request, endpoint, use_new_scheme=False) + _switch_hosts(request, endpoint, use_new_scheme=False) def switch_host_with_param(request, param_name): @@ -1937,7 +1941,6 @@ def _s3_addressing_handler(self): return fix_s3_host - class S3ControlEndpointSetter(object): _DEFAULT_PARTITION = 'aws' _DEFAULT_DNS_SUFFIX = 'amazonaws.com' @@ -1967,7 +1970,6 @@ def set_endpoint(self, request, **kwargs): self._add_headers_from_arn_details(request) elif self._use_endpoint_from_outpost_id(request): self._validate_outpost_redirection_valid(request) - outpost_id = request.context['outpost_id'] self._override_signing_name(request, 's3-outposts') new_netloc = self._construct_outpost_endpoint(self._region) self._update_request_netloc(request, new_netloc) diff --git a/botocore/validate.py b/botocore/validate.py index 91ac08da03..727b2f99f8 100644 --- a/botocore/validate.py +++ b/botocore/validate.py @@ -103,45 +103,68 @@ def _format_error(self, error): error_type, name, additional = error name = self._get_name(name) if error_type == 'missing required field': - return 'Missing required parameter in %s: "%s"' % ( - name, additional['required_name']) + return ( + 'Missing required parameter in %s: "%s"' % ( + name, additional['required_name'] + ) + ) elif error_type == 'unknown field': - return 'Unknown parameter in %s: "%s", must be one of: %s' % ( - name, additional['unknown_param'], - ', '.join(additional['valid_names'])) + return ( + 'Unknown parameter in %s: "%s", must be one of: %s' % ( + name, additional['unknown_param'], + ', '.join(additional['valid_names']) + ) + ) elif error_type == 'invalid type': - return 'Invalid type for parameter %s, value: %s, type: %s, ' \ - 'valid types: %s' % (name, additional['param'], - str(type(additional['param'])), - ', '.join(additional['valid_types'])) + return ( + 'Invalid type for parameter %s, value: %s, type: %s, ' + 'valid types: %s' % ( + name, additional['param'], + str(type(additional['param'])), + ', '.join(additional['valid_types']) + ) + ) elif error_type == 'invalid range': min_allowed = additional['min_allowed'] - return ('Invalid value for parameter %s, value: %s, ' - 'valid min value: %s' % (name, additional['param'], - min_allowed)) + return ( + 'Invalid value for parameter %s, value: %s, valid min value: ' + '%s' % (name, additional['param'], min_allowed) + ) elif error_type == 'invalid length': min_allowed = additional['min_allowed'] - return ('Invalid length for parameter %s, value: %s, ' - 'valid min length: %s' % (name, additional['param'], - min_allowed)) + return ( + 'Invalid length for parameter %s, value: %s, ' + 'valid min length: %s' % ( + name, additional['param'], min_allowed + ) + ) elif error_type == 'unable to encode to json': - return 'Invalid parameter %s must be json serializable: %s' \ - % (name, additional['type_error']) + return ( + 'Invalid parameter %s must be json serializable: %s' % ( + name, additional['type_error'] + ) + ) elif error_type == 'invalid type for document': - return 'Invalid type for document parameter %s, value: %s, type: %s, ' \ - 'valid types: %s' % (name, additional['param'], - str(type(additional['param'])), - ', '.join(additional['valid_types'])) + return ( + 'Invalid type for document parameter %s, value: %s, type: %s, ' + 'valid types: %s' % ( + name, + additional['param'], + str(type(additional['param'])), + ', '.join(additional['valid_types']) + ) + ) elif error_type == 'more than one input': - return 'Invalid number of parameters set for tagged union structure' \ - ' %s. Can only set one of the following keys: %s.' % ( - name, '. '.join(additional['members']) - ) + return ( + 'Invalid number of parameters set for tagged union structure ' + '%s. Can only set one of the following keys: ' + '%s.' % (name, '. '.join(additional['members'])) + ) elif error_type == 'empty input': - return 'Must set one of the following keys for tagged union' \ - 'structure %s: %s.' % ( - name, '. '.join(additional['members']) - ) + return ( + 'Must set one of the following keys for tagged union' + 'structure %s: %s.' % (name, '. '.join(additional['members'])) + ) def _get_name(self, name): if not name: diff --git a/botocore/waiter.py b/botocore/waiter.py index 6e8834a22d..edd1b60aac 100644 --- a/botocore/waiter.py +++ b/botocore/waiter.py @@ -345,8 +345,8 @@ def wait(self, **kwargs): return if current_state == 'failure': reason = 'Waiter encountered a terminal failure state: %s' % ( - acceptor.explanation - ) + acceptor.explanation + ) raise WaiterError( name=self.name, reason=reason, @@ -356,7 +356,7 @@ def wait(self, **kwargs): if last_matched_acceptor is None: reason = 'Max attempts exceeded' else: - reason = 'Max attempts exceeded. Previously accepted state: %s' %( + reason = 'Max attempts exceeded. Previously accepted state: %s' % ( acceptor.explanation ) raise WaiterError( diff --git a/scripts/ci/run-crt-tests b/scripts/ci/run-crt-tests index 63fa6f8179..9ff2f067a6 100755 --- a/scripts/ci/run-crt-tests +++ b/scripts/ci/run-crt-tests @@ -29,7 +29,7 @@ def run(command): try: - import awscrt + import awscrt # noqa except ImportError: print("MISSING DEPENDENCY: awscrt must be installed to run the crt tests.") sys.exit(1) diff --git a/scripts/new-change b/scripts/new-change index 58b01ac73c..07c805df50 100755 --- a/scripts/new-change +++ b/scripts/new-change @@ -135,7 +135,7 @@ def replace_issue_references(parsed, repo_name): '`%s `__' % ( match.group(), repo_name, number)) - new_description = re.sub('#\d+', linkify, description) + new_description = re.sub(r'#\d+', linkify, description) parsed['description'] = new_description diff --git a/setup.cfg b/setup.cfg index bf86581db5..fc9b6398b2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -11,5 +11,4 @@ requires_dist = crt = awscrt==0.11.24 [flake8] -# We ignore E203, E501 for this project -ignore = E203,E501 +ignore = E203,E226,E501,E731,W503,W504 diff --git a/setup.py b/setup.py index 7b8033a839..3d62c4e360 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,6 @@ import codecs import os.path import re -import sys from setuptools import setup, find_packages diff --git a/tests/__init__.py b/tests/__init__.py index d196757bc2..222e939f4e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -348,6 +348,7 @@ def _urlparse(url): url = url.decode('utf8') return urlparse(url) + def assert_url_equal(url1, url2): parts1 = _urlparse(url1) parts2 = _urlparse(url2) diff --git a/tests/acceptance/features/smoke/cloudhsm/cloudhsm.feature b/tests/acceptance/features/smoke/cloudhsm/cloudhsm.feature index 7aec4ba4d4..7c2237b273 100644 --- a/tests/acceptance/features/smoke/cloudhsm/cloudhsm.feature +++ b/tests/acceptance/features/smoke/cloudhsm/cloudhsm.feature @@ -6,7 +6,7 @@ Feature: Amazon CloudHSM When I call the "ListHapgs" API Then the value at "HapgList" should be a list - Scenario: Handling errors + Scenario: Handling errors When I attempt to call the "DescribeHapg" API with: | HapgArn | bogus-arn | Then I expect the response error code to be "ValidationException" diff --git a/tests/acceptance/features/smoke/cloudsearch/cloudsearch.feature b/tests/acceptance/features/smoke/cloudsearch/cloudsearch.feature index 501f9df11d..0789cba907 100644 --- a/tests/acceptance/features/smoke/cloudsearch/cloudsearch.feature +++ b/tests/acceptance/features/smoke/cloudsearch/cloudsearch.feature @@ -6,7 +6,7 @@ Feature: Amazon CloudSearch When I call the "DescribeDomains" API Then the response should contain a "DomainStatusList" - Scenario: Handling errors + Scenario: Handling errors When I attempt to call the "DescribeIndexFields" API with: | DomainName | fakedomain | Then I expect the response error code to be "ResourceNotFound" diff --git a/tests/acceptance/features/smoke/cloudwatchlogs/cloudwatchlogs.feature b/tests/acceptance/features/smoke/cloudwatchlogs/cloudwatchlogs.feature index bfd4f143e8..57a6a2ccca 100644 --- a/tests/acceptance/features/smoke/cloudwatchlogs/cloudwatchlogs.feature +++ b/tests/acceptance/features/smoke/cloudwatchlogs/cloudwatchlogs.feature @@ -6,7 +6,7 @@ Feature: Amazon CloudWatch Logs When I call the "DescribeLogGroups" API Then the value at "logGroups" should be a list - Scenario: Handling errors + Scenario: Handling errors When I attempt to call the "GetLogEvents" API with: | logGroupName | fakegroup | | logStreamName | fakestream | diff --git a/tests/acceptance/features/smoke/directoryservice/directoryservice.feature b/tests/acceptance/features/smoke/directoryservice/directoryservice.feature index 52031f1568..90c2451187 100644 --- a/tests/acceptance/features/smoke/directoryservice/directoryservice.feature +++ b/tests/acceptance/features/smoke/directoryservice/directoryservice.feature @@ -15,4 +15,3 @@ Feature: AWS Directory Service | Password | | | Size | | Then I expect the response error code to be "ValidationException" - diff --git a/tests/functional/docs/test_s3.py b/tests/functional/docs/test_s3.py index a1d463c577..b285b5acdc 100644 --- a/tests/functional/docs/test_s3.py +++ b/tests/functional/docs/test_s3.py @@ -44,14 +44,14 @@ def test_hides_content_md5_when_impossible_to_provide(self): method_contents.decode('utf-8')) def test_copy_source_documented_as_union_type(self): - content = self.get_docstring_for_method('s3', 'copy_object') + content = self.get_docstring_for_method('s3', 'copy_object') dict_form = ( "{'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}") self.assert_contains_line( "CopySource='string' or %s" % dict_form, content) def test_copy_source_param_docs_also_modified(self): - content = self.get_docstring_for_method('s3', 'copy_object') + content = self.get_docstring_for_method('s3', 'copy_object') param_docs = self.get_parameter_document_block('CopySource', content) # We don't want to overspecify the test, so I've picked # an arbitrary line from the customized docs. diff --git a/tests/functional/test_apigateway.py b/tests/functional/test_apigateway.py index 52488a974e..e8718e6242 100644 --- a/tests/functional/test_apigateway.py +++ b/tests/functional/test_apigateway.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import mock, BaseSessionTest, ClientHTTPStubber +from tests import BaseSessionTest, ClientHTTPStubber class TestApiGateway(BaseSessionTest): diff --git a/tests/functional/test_block_fips.py b/tests/functional/test_block_fips.py index 9a4b1dc2b3..502850ebf5 100644 --- a/tests/functional/test_block_fips.py +++ b/tests/functional/test_block_fips.py @@ -56,10 +56,10 @@ def test_blocks_unknown_fips_pseudo_regions_presign(self): region = 'us-weast-1-fips' client, _ = self._make_client('accessanalyzer', region) with self.assertRaises(UnknownFIPSEndpointError): - url = client.generate_presigned_url('list_analyzers', Params={}) + client.generate_presigned_url('list_analyzers', Params={}) def test_blocks_unknown_fips_pseudo_regions_presign_post(self): region = 'fips-us-gov-weast-1' client, _ = self._make_client('s3', region) with self.assertRaises(UnknownFIPSEndpointError): - post = client.generate_presigned_post('foo-bucket', 'foo-key') + client.generate_presigned_post('foo-bucket', 'foo-key') diff --git a/tests/functional/test_client.py b/tests/functional/test_client.py index 9bbb6d7d23..6698dc4ea5 100644 --- a/tests/functional/test_client.py +++ b/tests/functional/test_client.py @@ -2,6 +2,7 @@ import botocore + class TestCreateClients(unittest.TestCase): def setUp(self): self.session = botocore.session.get_session() diff --git a/tests/functional/test_client_class_names.py b/tests/functional/test_client_class_names.py index bb9ae0c1a9..073b816cc0 100644 --- a/tests/functional/test_client_class_names.py +++ b/tests/functional/test_client_class_names.py @@ -68,6 +68,7 @@ 'workspaces': 'WorkSpaces' } + @pytest.mark.parametrize("service_name", SERVICE_TO_CLASS_NAME) def test_client_has_correct_class_name(service_name): session = botocore.session.get_session() diff --git a/tests/functional/test_client_metadata.py b/tests/functional/test_client_metadata.py index a560b5a804..a156b9555e 100644 --- a/tests/functional/test_client_metadata.py +++ b/tests/functional/test_client_metadata.py @@ -14,6 +14,7 @@ import botocore.session + class TestClientMeta(unittest.TestCase): def setUp(self): self.session = botocore.session.get_session() diff --git a/tests/functional/test_cloudsearchdomain.py b/tests/functional/test_cloudsearchdomain.py index d39eb588d1..a39919b538 100644 --- a/tests/functional/test_cloudsearchdomain.py +++ b/tests/functional/test_cloudsearchdomain.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import mock, BaseSessionTest, ClientHTTPStubber +from tests import BaseSessionTest, ClientHTTPStubber class TestCloudsearchdomain(BaseSessionTest): diff --git a/tests/functional/test_cognito_idp.py b/tests/functional/test_cognito_idp.py index 648253f624..fedbf2c938 100644 --- a/tests/functional/test_cognito_idp.py +++ b/tests/functional/test_cognito_idp.py @@ -80,6 +80,7 @@ }, } + @pytest.mark.parametrize("operation_name, parameters", OPERATION_PARAMS.items()) def test_unsigned_operations(operation_name, parameters): environ = { diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py index dceca9d8fc..7fa6eb1f41 100644 --- a/tests/functional/test_credentials.py +++ b/tests/functional/test_credentials.py @@ -34,7 +34,6 @@ from botocore.credentials import DeferredRefreshableCredentials from botocore.credentials import create_credential_resolver from botocore.credentials import JSONFileCache -from botocore.credentials import SSOProvider from botocore.config import Config from botocore.session import Session from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError @@ -91,6 +90,7 @@ def test_has_no_race_conditions(self): advisory_refresh=1, mandatory_refresh=0 ) + def _run_in_thread(collected): for _ in range(4000): frozen = creds.get_frozen_credentials() @@ -116,6 +116,7 @@ def test_no_race_for_immediate_advisory_expiration(self): advisory_refresh=1, mandatory_refresh=0 ) + def _run_in_thread(collected): for _ in range(100): frozen = creds.get_frozen_credentials() diff --git a/tests/functional/test_discovery.py b/tests/functional/test_discovery.py index eaae3464a8..abf8b3c5ae 100644 --- a/tests/functional/test_discovery.py +++ b/tests/functional/test_discovery.py @@ -10,12 +10,9 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os - from tests import ClientHTTPStubber, temporary_file from tests.functional import FunctionalSessionTest -import botocore from botocore.config import Config from botocore.compat import json from botocore.discovery import EndpointDiscoveryRequired @@ -87,7 +84,6 @@ def test_endpoint_discovery_enabled(self): self.assert_endpoint_discovery_used(stubber, discovered_endpoint) def test_endpoint_discovery_with_invalid_endpoint(self): - discovered_endpoint = 'https://discovered.domain' response = { 'Error': { 'Code': 'InvalidEndpointException', diff --git a/tests/functional/test_docdb.py b/tests/functional/test_docdb.py index 8ca050db8e..1408b0a7b9 100644 --- a/tests/functional/test_docdb.py +++ b/tests/functional/test_docdb.py @@ -10,12 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from contextlib import contextmanager - -import botocore.session from tests import BaseSessionTest, ClientHTTPStubber -from botocore.stub import Stubber -from tests import mock, unittest class TestDocDBPresignUrlInjection(BaseSessionTest): diff --git a/tests/functional/test_event_alias.py b/tests/functional/test_event_alias.py index e29c096388..91dab543cf 100644 --- a/tests/functional/test_event_alias.py +++ b/tests/functional/test_event_alias.py @@ -582,7 +582,6 @@ def _event_aliases(): for client_name in SERVICES.keys(): - endpoint_prefix = SERVICES[client_name].get('endpoint_prefix') service_id = SERVICES[client_name]['service_id'] yield client_name, service_id @@ -590,7 +589,6 @@ def _event_aliases(): def _event_aliases_with_endpoint_prefix(): for client_name in SERVICES.keys(): endpoint_prefix = SERVICES[client_name].get('endpoint_prefix') - service_id = SERVICES[client_name]['service_id'] if endpoint_prefix is not None: yield client_name, endpoint_prefix diff --git a/tests/functional/test_h2_required.py b/tests/functional/test_h2_required.py index a93a560d18..cc80e4df53 100644 --- a/tests/functional/test_h2_required.py +++ b/tests/functional/test_h2_required.py @@ -21,6 +21,7 @@ 'lexv2-runtime': ['StartConversation'], } + def _all_test_cases(): session = get_session() loader = session.get_component('data_loader') diff --git a/tests/functional/test_history.py b/tests/functional/test_history.py index 23bc794b81..8750765c18 100644 --- a/tests/functional/test_history.py +++ b/tests/functional/test_history.py @@ -1,6 +1,4 @@ -from contextlib import contextmanager - -from tests import mock, BaseSessionTest, ClientHTTPStubber +from tests import BaseSessionTest, ClientHTTPStubber from botocore.history import BaseHistoryHandler from botocore.history import get_global_history_recorder @@ -55,9 +53,9 @@ def test_does_record_api_call(self): event = api_call_events[0] event_type, payload, source = event self.assertEqual(payload, { - 'operation': u'ListBuckets', - 'params': {}, - 'service': 's3' + 'operation': u'ListBuckets', + 'params': {}, + 'service': 's3' }) self.assertEqual(source, 'BOTOCORE') @@ -102,7 +100,9 @@ def test_does_record_http_response(self): event = http_response_events[0] event_type, payload, source = event - self.assertEqual(payload, { + self.assertEqual( + payload, + { 'status_code': 200, 'headers': {}, 'streaming': False, diff --git a/tests/functional/test_iot_data.py b/tests/functional/test_iot_data.py index e9eeb8a09c..cb3c2bc9ad 100644 --- a/tests/functional/test_iot_data.py +++ b/tests/functional/test_iot_data.py @@ -10,8 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import sys -from tests import unittest, mock, BaseSessionTest +from tests import mock, BaseSessionTest from botocore.exceptions import UnsupportedTLSVersionWarning diff --git a/tests/functional/test_kinesis.py b/tests/functional/test_kinesis.py index 26317956f3..fc72e70a61 100644 --- a/tests/functional/test_kinesis.py +++ b/tests/functional/test_kinesis.py @@ -11,10 +11,9 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json -import time from base64 import b64decode from uuid import uuid4 -from tests import unittest, BaseSessionTest, ClientHTTPStubber +from tests import BaseSessionTest, ClientHTTPStubber class TestKinesisListStreams(BaseSessionTest): diff --git a/tests/functional/test_loaders.py b/tests/functional/test_loaders.py index 97b9ab9e8f..c8d89f4dd5 100644 --- a/tests/functional/test_loaders.py +++ b/tests/functional/test_loaders.py @@ -28,7 +28,7 @@ def create_file(self, f, contents, name): def test_can_override_session(self): with temporary_file('w') as f: - # We're going to override _retry.json in + # We're going to override _retry.json in # botocore/data by setting our own data directory. override_name = self.create_file( f, contents='{"foo": "bar"}', name='_retry.json') diff --git a/tests/functional/test_machinelearning.py b/tests/functional/test_machinelearning.py index 12f53ba520..d96fb4afb5 100644 --- a/tests/functional/test_machinelearning.py +++ b/tests/functional/test_machinelearning.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import mock, BaseSessionTest, ClientHTTPStubber +from tests import BaseSessionTest, ClientHTTPStubber class TestMachineLearning(BaseSessionTest): diff --git a/tests/functional/test_model_backcompat.py b/tests/functional/test_model_backcompat.py index f4a3ab9e73..f3ecd8d4b1 100644 --- a/tests/functional/test_model_backcompat.py +++ b/tests/functional/test_model_backcompat.py @@ -10,8 +10,6 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os - from botocore.session import Session from tests import ClientHTTPStubber from tests.functional import TEST_MODELS_DIR diff --git a/tests/functional/test_model_completeness.py b/tests/functional/test_model_completeness.py index 20c824f10d..83a96666e1 100644 --- a/tests/functional/test_model_completeness.py +++ b/tests/functional/test_model_completeness.py @@ -36,14 +36,14 @@ def test_paginators_and_waiters_are_not_lost_in_new_version( # there will be a successor existing in latest version. loader = Loader() try: - previous = loader.load_service_model( + loader.load_service_model( service_name, type_name, previous_version ) except DataNotFoundError: pass else: try: - latest = loader.load_service_model( + loader.load_service_model( service_name, type_name, latest_version ) except DataNotFoundError as e: diff --git a/tests/functional/test_modeled_exceptions.py b/tests/functional/test_modeled_exceptions.py index c4766139b4..0d16939da2 100644 --- a/tests/functional/test_modeled_exceptions.py +++ b/tests/functional/test_modeled_exceptions.py @@ -10,9 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from contextlib import contextmanager - -from tests import unittest, BaseSessionTest, ClientHTTPStubber +from tests import BaseSessionTest, ClientHTTPStubber class TestModeledExceptions(BaseSessionTest): diff --git a/tests/functional/test_neptune.py b/tests/functional/test_neptune.py index e290adb4e2..5bd5fdbcc0 100644 --- a/tests/functional/test_neptune.py +++ b/tests/functional/test_neptune.py @@ -10,12 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from contextlib import contextmanager - -import botocore.session from tests import BaseSessionTest, ClientHTTPStubber -from botocore.stub import Stubber -from tests import mock, unittest class TestNeptunePresignUrlInjection(BaseSessionTest): diff --git a/tests/functional/test_paginate.py b/tests/functional/test_paginate.py index 8203e97a50..f086dd80cd 100644 --- a/tests/functional/test_paginate.py +++ b/tests/functional/test_paginate.py @@ -77,8 +77,9 @@ def setUp(self): self.stubber = Stubber(self.client) self.stubber.activate() - def _setup_scaling_pagination(self, page_size=200, max_items=100, - total_items=600): + def _setup_scaling_pagination( + self, page_size=200, max_items=100, total_items=600 + ): """ Add to the stubber to test paginating describe_scaling_activities. diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py index ad74298a5d..1054e616a6 100644 --- a/tests/functional/test_paginator_config.py +++ b/tests/functional/test_paginator_config.py @@ -147,6 +147,7 @@ def _pagination_configs(): service_model ) + @pytest.mark.parametrize( "operation_name, page_config, service_model", _pagination_configs() diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py index 9632518438..8bf08ab646 100644 --- a/tests/functional/test_public_apis.py +++ b/tests/functional/test_public_apis.py @@ -17,7 +17,6 @@ from tests import mock from tests import ClientHTTPStubber from botocore.session import Session -from botocore.exceptions import NoCredentialsError from botocore import xform_name diff --git a/tests/functional/test_rds.py b/tests/functional/test_rds.py index 918499c867..8c2e57c5b6 100644 --- a/tests/functional/test_rds.py +++ b/tests/functional/test_rds.py @@ -10,10 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from contextlib import contextmanager - import botocore.session -from tests import mock, BaseSessionTest, ClientHTTPStubber +from tests import BaseSessionTest, ClientHTTPStubber from botocore.stub import Stubber from tests import unittest @@ -36,9 +34,9 @@ def test_copy_snapshot(self): 'SourceRegion': 'us-east-1' } response_body = ( - b'' - b'' - b'' + b'' + b'' + b'' ) self.http_stubber.add_response(body=response_body) with self.http_stubber: diff --git a/tests/functional/test_route53.py b/tests/functional/test_route53.py index 2dcd63a7b9..91d4eeb3df 100644 --- a/tests/functional/test_route53.py +++ b/tests/functional/test_route53.py @@ -15,6 +15,7 @@ import botocore.session from botocore.stub import Stubber + class TestRoute53Pagination(unittest.TestCase): def setUp(self): self.session = botocore.session.get_session() @@ -36,7 +37,7 @@ def test_paginate_with_max_items_int(self): self.stubber.add_response(self.operation_name, self.response) paginator = self.client.get_paginator('list_hosted_zones') with self.stubber: - config={'PageSize': 1} + config = {'PageSize': 1} results = list(paginator.paginate(PaginationConfig=config)) self.assertTrue(len(results) >= 0) @@ -46,10 +47,11 @@ def test_paginate_with_max_items_str(self): self.stubber.add_response(self.operation_name, self.response) paginator = self.client.get_paginator('list_hosted_zones') with self.stubber: - config={'PageSize': '1'} + config = {'PageSize': '1'} results = list(paginator.paginate(PaginationConfig=config)) self.assertTrue(len(results) >= 0) + class TestRoute53EndpointResolution(BaseSessionTest): def create_stubbed_client(self, service_name, region_name, **kwargs): diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index e55796d452..b033c83dbf 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -11,6 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import base64 +import datetime import re import pytest @@ -22,9 +23,10 @@ import botocore.session from botocore.config import Config -from botocore.compat import datetime, urlsplit, parse_qs, get_md5 +from botocore.compat import urlsplit, parse_qs, get_md5 from botocore.exceptions import ( - ParamValidationError, ClientError, + ParamValidationError, + ClientError, UnsupportedS3ConfigurationError, UnsupportedS3AccesspointConfigurationError, InvalidS3UsEast1RegionalEndpointConfigError, @@ -39,68 +41,64 @@ class TestS3BucketValidation(unittest.TestCase): def test_invalid_bucket_name_raises_error(self): session = botocore.session.get_session() - s3 = session.create_client('s3') + s3 = session.create_client("s3") with self.assertRaises(ParamValidationError): - s3.put_object(Bucket='adfgasdfadfs/bucket/name', - Key='foo', Body=b'asdf') + s3.put_object(Bucket="adfgasdfadfs/bucket/name", Key="foo", Body=b"asdf") class BaseS3OperationTest(BaseSessionTest): def setUp(self): super(BaseS3OperationTest, self).setUp() - self.region = 'us-west-2' - self.client = self.session.create_client( - 's3', self.region) + self.region = "us-west-2" + self.client = self.session.create_client("s3", self.region) self.http_stubber = ClientHTTPStubber(self.client) class BaseS3ClientConfigurationTest(BaseSessionTest): _V4_AUTH_REGEX = re.compile( - r'AWS4-HMAC-SHA256 ' - r'Credential=\w+/\d+/' - r'(?P[a-z0-9-]+)/' - r'(?P[a-z0-9-]+)/' + r"AWS4-HMAC-SHA256 " + r"Credential=\w+/\d+/" + r"(?P[a-z0-9-]+)/" + r"(?P[a-z0-9-]+)/" ) def setUp(self): super(BaseS3ClientConfigurationTest, self).setUp() - self.region = 'us-west-2' + self.region = "us-west-2" def assert_signing_region(self, request, expected_region): - auth_header = request.headers['Authorization'].decode('utf-8') + auth_header = request.headers["Authorization"].decode("utf-8") actual_region = None match = self._V4_AUTH_REGEX.match(auth_header) if match: - actual_region = match.group('signing_region') + actual_region = match.group("signing_region") self.assertEqual(expected_region, actual_region) def assert_signing_name(self, request, expected_name): - auth_header = request.headers['Authorization'].decode('utf-8') + auth_header = request.headers["Authorization"].decode("utf-8") actual_name = None match = self._V4_AUTH_REGEX.match(auth_header) if match: - actual_name = match.group('signing_name') + actual_name = match.group("signing_name") self.assertEqual(expected_name, actual_name) def assert_signing_region_in_url(self, url, expected_region): qs_components = parse_qs(urlsplit(url).query) - self.assertIn(expected_region, qs_components['X-Amz-Credential'][0]) + self.assertIn(expected_region, qs_components["X-Amz-Credential"][0]) def assert_endpoint(self, request, expected_endpoint): actual_endpoint = urlsplit(request.url).netloc self.assertEqual(actual_endpoint, expected_endpoint) def create_s3_client(self, **kwargs): - client_kwargs = { - 'region_name': self.region - } + client_kwargs = {"region_name": self.region} client_kwargs.update(kwargs) - return self.session.create_client('s3', **client_kwargs) + return self.session.create_client("s3", **client_kwargs) def set_config_file(self, fileobj, contents): fileobj.write(contents) fileobj.flush() - self.environ['AWS_CONFIG_FILE'] = fileobj.name + self.environ["AWS_CONFIG_FILE"] = fileobj.name class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest): @@ -109,356 +107,267 @@ def test_no_s3_config(self): self.assertIsNone(client.meta.config.s3) def test_client_s3_dualstack_handles_uppercase_true(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' use_dualstack_endpoint = True' + f, "[default]\n" "s3 = \n" " use_dualstack_endpoint = True" ) client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3['use_dualstack_endpoint'], True) + self.assertEqual(client.meta.config.s3["use_dualstack_endpoint"], True) def test_client_s3_dualstack_handles_lowercase_true(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' use_dualstack_endpoint = true' + f, "[default]\n" "s3 = \n" " use_dualstack_endpoint = true" ) client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3['use_dualstack_endpoint'], True) + self.assertEqual(client.meta.config.s3["use_dualstack_endpoint"], True) def test_client_s3_accelerate_handles_uppercase_true(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' use_accelerate_endpoint = True' + f, "[default]\n" "s3 = \n" " use_accelerate_endpoint = True" ) client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3['use_accelerate_endpoint'], True) + self.assertEqual(client.meta.config.s3["use_accelerate_endpoint"], True) def test_client_s3_accelerate_handles_lowercase_true(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' use_accelerate_endpoint = true' + f, "[default]\n" "s3 = \n" " use_accelerate_endpoint = true" ) client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3['use_accelerate_endpoint'], True) + self.assertEqual(client.meta.config.s3["use_accelerate_endpoint"], True) def test_client_payload_signing_enabled_handles_uppercase_true(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' payload_signing_enabled = True' + f, "[default]\n" "s3 = \n" " payload_signing_enabled = True" ) client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3['payload_signing_enabled'], True) + self.assertEqual(client.meta.config.s3["payload_signing_enabled"], True) def test_client_payload_signing_enabled_handles_lowercase_true(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' payload_signing_enabled = true' + f, "[default]\n" "s3 = \n" " payload_signing_enabled = true" ) client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3['payload_signing_enabled'], True) + self.assertEqual(client.meta.config.s3["payload_signing_enabled"], True) def test_includes_unmodeled_s3_config_vars(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' unmodeled = unmodeled_val' + f, "[default]\n" "s3 = \n" " unmodeled = unmodeled_val" ) client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3['unmodeled'], 'unmodeled_val') + self.assertEqual(client.meta.config.s3["unmodeled"], "unmodeled_val") def test_mixed_modeled_and_unmodeled_config_vars(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( f, - '[default]\n' - 's3 = \n' - ' payload_signing_enabled = true\n' - ' unmodeled = unmodeled_val' + "[default]\n" + "s3 = \n" + " payload_signing_enabled = true\n" + " unmodeled = unmodeled_val", ) client = self.create_s3_client() self.assertEqual( client.meta.config.s3, - { - 'payload_signing_enabled': True, - 'unmodeled': 'unmodeled_val' - } + {"payload_signing_enabled": True, "unmodeled": "unmodeled_val"}, ) def test_use_arn_region(self): - self.environ['AWS_S3_USE_ARN_REGION'] = 'true' + self.environ["AWS_S3_USE_ARN_REGION"] = "true" client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'use_arn_region': True, - } + "use_arn_region": True, + }, ) def test_use_arn_region_config_var(self): - with temporary_file('w') as f: - self.set_config_file( - f, - '[default]\n' - 's3_use_arn_region = true' - ) + with temporary_file("w") as f: + self.set_config_file(f, "[default]\n" "s3_use_arn_region = true") client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'use_arn_region': True, - } + "use_arn_region": True, + }, ) def test_use_arn_region_nested_config_var(self): - with temporary_file('w') as f: - self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' use_arn_region = true' - ) + with temporary_file("w") as f: + self.set_config_file(f, "[default]\n" "s3 = \n" " use_arn_region = true") client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'use_arn_region': True, - } + "use_arn_region": True, + }, ) def test_use_arn_region_is_case_insensitive(self): - self.environ['AWS_S3_USE_ARN_REGION'] = 'True' + self.environ["AWS_S3_USE_ARN_REGION"] = "True" client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'use_arn_region': True, - } + "use_arn_region": True, + }, ) def test_use_arn_region_env_var_overrides_config_var(self): - self.environ['AWS_S3_USE_ARN_REGION'] = 'false' - with temporary_file('w') as f: - self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' use_arn_region = true' - ) + self.environ["AWS_S3_USE_ARN_REGION"] = "false" + with temporary_file("w") as f: + self.set_config_file(f, "[default]\n" "s3 = \n" " use_arn_region = true") client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'use_arn_region': False, - } + "use_arn_region": False, + }, ) def test_client_config_use_arn_region_overrides_env_var(self): - self.environ['AWS_S3_USE_ARN_REGION'] = 'true' - client = self.create_s3_client( - config=Config( - s3={'use_arn_region': False} - ) - ) + self.environ["AWS_S3_USE_ARN_REGION"] = "true" + client = self.create_s3_client(config=Config(s3={"use_arn_region": False})) self.assertEqual( client.meta.config.s3, { - 'use_arn_region': False, - } + "use_arn_region": False, + }, ) def test_client_config_use_arn_region_overrides_config_var(self): - with temporary_file('w') as f: - self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' use_arn_region = true' - ) - client = self.create_s3_client( - config=Config( - s3={'use_arn_region': False} - ) - ) + with temporary_file("w") as f: + self.set_config_file(f, "[default]\n" "s3 = \n" " use_arn_region = true") + client = self.create_s3_client(config=Config(s3={"use_arn_region": False})) self.assertEqual( client.meta.config.s3, { - 'use_arn_region': False, - } + "use_arn_region": False, + }, ) - def test_use_arn_region_is_case_insensitive(self): - self.environ['AWS_S3_USE_ARN_REGION'] = 'True' - client = self.create_s3_client() - self.assertEqual( - client.meta.config.s3, - { - 'use_arn_region': True, - } - ) - - def test_us_east_1_regional_env_var(self): - self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + self.environ["AWS_S3_US_EAST_1_REGIONAL_ENDPOINT"] = "regional" client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'us_east_1_regional_endpoint': 'regional', - } + "us_east_1_regional_endpoint": "regional", + }, ) def test_us_east_1_regional_config_var(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3_us_east_1_regional_endpoint = regional' + f, "[default]\n" "s3_us_east_1_regional_endpoint = regional" ) client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'us_east_1_regional_endpoint': 'regional', - } + "us_east_1_regional_endpoint": "regional", + }, ) def test_us_east_1_regional_nested_config_var(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' us_east_1_regional_endpoint = regional' + f, "[default]\n" "s3 = \n" " us_east_1_regional_endpoint = regional" ) client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'us_east_1_regional_endpoint': 'regional', - } + "us_east_1_regional_endpoint": "regional", + }, ) def test_us_east_1_regional_env_var_overrides_config_var(self): - self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' - with temporary_file('w') as f: + self.environ["AWS_S3_US_EAST_1_REGIONAL_ENDPOINT"] = "regional" + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' us_east_1_regional_endpoint = legacy' + f, "[default]\n" "s3 = \n" " us_east_1_regional_endpoint = legacy" ) client = self.create_s3_client() self.assertEqual( client.meta.config.s3, { - 'us_east_1_regional_endpoint': 'regional', - } + "us_east_1_regional_endpoint": "regional", + }, ) def test_client_config_us_east_1_regional_overrides_env_var(self): - self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + self.environ["AWS_S3_US_EAST_1_REGIONAL_ENDPOINT"] = "regional" client = self.create_s3_client( - config=Config( - s3={'us_east_1_regional_endpoint': 'legacy'} - ) + config=Config(s3={"us_east_1_regional_endpoint": "legacy"}) ) self.assertEqual( client.meta.config.s3, { - 'us_east_1_regional_endpoint': 'legacy', - } + "us_east_1_regional_endpoint": "legacy", + }, ) def test_client_config_us_east_1_regional_overrides_config_var(self): - with temporary_file('w') as f: + with temporary_file("w") as f: self.set_config_file( - f, - '[default]\n' - 's3 = \n' - ' us_east_1_regional_endpoint = legacy' + f, "[default]\n" "s3 = \n" " us_east_1_regional_endpoint = legacy" ) client = self.create_s3_client( - config=Config( - s3={'us_east_1_regional_endpoint': 'regional'} - ) + config=Config(s3={"us_east_1_regional_endpoint": "regional"}) ) self.assertEqual( client.meta.config.s3, { - 'us_east_1_regional_endpoint': 'regional', - } + "us_east_1_regional_endpoint": "regional", + }, ) def test_client_validates_us_east_1_regional(self): with self.assertRaises(InvalidS3UsEast1RegionalEndpointConfigError): self.create_s3_client( - config=Config( - s3={'us_east_1_regional_endpoint': 'not-valid'} - ) + config=Config(s3={"us_east_1_regional_endpoint": "not-valid"}) ) def test_client_region_defaults_to_us_east_1(self): client = self.create_s3_client(region_name=None) - self.assertEqual(client.meta.region_name, 'us-east-1') + self.assertEqual(client.meta.region_name, "us-east-1") def test_client_region_remains_us_east_1(self): - client = self.create_s3_client(region_name='us-east-1') - self.assertEqual(client.meta.region_name, 'us-east-1') + client = self.create_s3_client(region_name="us-east-1") + self.assertEqual(client.meta.region_name, "us-east-1") def test_client_region_remains_aws_global(self): - client = self.create_s3_client(region_name='aws-global') - self.assertEqual(client.meta.region_name, 'aws-global') + client = self.create_s3_client(region_name="aws-global") + self.assertEqual(client.meta.region_name, "aws-global") def test_client_region_defaults_to_aws_global_for_regional(self): - self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' + self.environ["AWS_S3_US_EAST_1_REGIONAL_ENDPOINT"] = "regional" client = self.create_s3_client(region_name=None) - self.assertEqual(client.meta.region_name, 'aws-global') + self.assertEqual(client.meta.region_name, "aws-global") def test_client_region_remains_us_east_1_for_regional(self): - self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' - client = self.create_s3_client(region_name='us-east-1') - self.assertEqual(client.meta.region_name, 'us-east-1') + self.environ["AWS_S3_US_EAST_1_REGIONAL_ENDPOINT"] = "regional" + client = self.create_s3_client(region_name="us-east-1") + self.assertEqual(client.meta.region_name, "us-east-1") def test_client_region_remains_aws_global_for_regional(self): - self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional' - client = self.create_s3_client(region_name='aws-global') - self.assertEqual(client.meta.region_name, 'aws-global') + self.environ["AWS_S3_US_EAST_1_REGIONAL_ENDPOINT"] = "regional" + client = self.create_s3_client(region_name="aws-global") + self.assertEqual(client.meta.region_name, "aws-global") class TestS3Copy(BaseS3OperationTest): - def create_s3_client(self, **kwargs): - client_kwargs = { - 'region_name': self.region - } + client_kwargs = {"region_name": self.region} client_kwargs.update(kwargs) - return self.session.create_client('s3', **client_kwargs) + return self.session.create_client("s3", **client_kwargs) def create_stubbed_s3_client(self, **kwargs): client = self.create_s3_client(**kwargs) @@ -468,43 +377,43 @@ def create_stubbed_s3_client(self, **kwargs): def test_s3_copy_object_with_empty_response(self): self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-east-1' + region_name="us-east-1" ) - empty_body = b'' + empty_body = b"" complete_body = ( b'\n\n' - b'' - b'2020-04-21T21:03:31.000Z' - b'"s0mEcH3cK5uM"' + b"2020-04-21T21:03:31.000Z" + b""s0mEcH3cK5uM"" ) self.http_stubber.add_response(status=200, body=empty_body) self.http_stubber.add_response(status=200, body=complete_body) response = self.client.copy_object( - Bucket='bucket', - CopySource='other-bucket/test.txt', - Key='test.txt', + Bucket="bucket", + CopySource="other-bucket/test.txt", + Key="test.txt", ) # Validate we retried and got second body self.assertEqual(len(self.http_stubber.requests), 2) - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertTrue('CopyObjectResult' in response) + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) + self.assertTrue("CopyObjectResult" in response) def test_s3_copy_object_with_incomplete_response(self): self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-east-1' + region_name="us-east-1" ) incomplete_body = b'\n\n\n' self.http_stubber.add_response(status=200, body=incomplete_body) with self.assertRaises(ResponseParserError): self.client.copy_object( - Bucket='bucket', - CopySource='other-bucket/test.txt', - Key='test.txt', + Bucket="bucket", + CopySource="other-bucket/test.txt", + Key="test.txt", ) @@ -519,17 +428,13 @@ def create_stubbed_s3_client(self, **kwargs): http_stubber.start() return client, http_stubber - def assert_expected_copy_source_header(self, - http_stubber, expected_copy_source): + def assert_expected_copy_source_header(self, http_stubber, expected_copy_source): request = self.http_stubber.requests[0] - self.assertIn('x-amz-copy-source', request.headers) - self.assertEqual( - request.headers['x-amz-copy-source'], expected_copy_source) + self.assertIn("x-amz-copy-source", request.headers) + self.assertEqual(request.headers["x-amz-copy-source"], expected_copy_source) def add_copy_object_response(self, http_stubber): - http_stubber.add_response( - body=b'' - ) + http_stubber.add_response(body=b"") def assert_endpoint(self, request, expected_endpoint): actual_endpoint = urlsplit(request.url).netloc @@ -539,248 +444,214 @@ def assert_header_matches(self, request, header_key, expected_value): self.assertEqual(request.headers.get(header_key), expected_value) def test_missing_account_id_in_arn(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2::accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2::accesspoint:myendpoint" with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=accesspoint_arn) def test_missing_accesspoint_name_in_arn(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint" with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=accesspoint_arn) def test_accesspoint_includes_asterisk(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:*' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:*" with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=accesspoint_arn) def test_accesspoint_arn_contains_subresources(self): accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint:object' + "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint:object" ) with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=accesspoint_arn) def test_accesspoint_arn_with_custom_endpoint(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, http_stubber = self.create_stubbed_s3_client( - endpoint_url='https://custom.com') + endpoint_url="https://custom.com" + ) http_stubber.add_response() self.client.list_objects(Bucket=accesspoint_arn) - expected_endpoint = 'myendpoint-123456789012.custom.com' + expected_endpoint = "myendpoint-123456789012.custom.com" self.assert_endpoint(http_stubber.requests[0], expected_endpoint) def test_accesspoint_arn_with_custom_endpoint_and_dualstack(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, http_stubber = self.create_stubbed_s3_client( - endpoint_url='https://custom.com', - config=Config(s3={'use_dualstack_endpoint': True})) + endpoint_url="https://custom.com", + config=Config(s3={"use_dualstack_endpoint": True}), + ) http_stubber.add_response() self.client.list_objects(Bucket=accesspoint_arn) - expected_endpoint = 'myendpoint-123456789012.custom.com' + expected_endpoint = "myendpoint-123456789012.custom.com" self.assert_endpoint(http_stubber.requests[0], expected_endpoint) def test_accesspoint_arn_with_s3_accelerate(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, _ = self.create_stubbed_s3_client( - config=Config(s3={'use_accelerate_endpoint': True})) + config=Config(s3={"use_accelerate_endpoint": True}) + ) with self.assertRaises( - botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): + botocore.exceptions.UnsupportedS3AccesspointConfigurationError + ): self.client.list_objects(Bucket=accesspoint_arn) def test_accesspoint_arn_cross_partition(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) - self.client, _ = self.create_stubbed_s3_client( - region_name='cn-north-1') + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" + self.client, _ = self.create_stubbed_s3_client(region_name="cn-north-1") with self.assertRaises( - botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): + botocore.exceptions.UnsupportedS3AccesspointConfigurationError + ): self.client.list_objects(Bucket=accesspoint_arn) def test_accesspoint_arn_cross_partition_use_client_region(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, _ = self.create_stubbed_s3_client( - region_name='cn-north-1', - config=Config(s3={'use_accelerate_endpoint': True}) + region_name="cn-north-1", + config=Config(s3={"use_accelerate_endpoint": True}), ) with self.assertRaises( - botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): + botocore.exceptions.UnsupportedS3AccesspointConfigurationError + ): self.client.list_objects(Bucket=accesspoint_arn) def test_signs_with_arn_region(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-east-1') + region_name="us-east-1" + ) self.http_stubber.add_response() self.client.list_objects(Bucket=accesspoint_arn) - self.assert_signing_region(self.http_stubber.requests[0], 'us-west-2') + self.assert_signing_region(self.http_stubber.requests[0], "us-west-2") def test_signs_with_client_region_when_use_arn_region_false(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-east-1', - config=Config(s3={'use_arn_region': False}) + region_name="us-east-1", config=Config(s3={"use_arn_region": False}) ) self.http_stubber.add_response() self.client.list_objects(Bucket=accesspoint_arn) - self.assert_signing_region(self.http_stubber.requests[0], 'us-east-1') + self.assert_signing_region(self.http_stubber.requests[0], "us-east-1") def test_presign_signs_with_arn_region(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, _ = self.create_stubbed_s3_client( - region_name='us-east-1', - config=Config(signature_version='s3v4') + region_name="us-east-1", config=Config(signature_version="s3v4") ) url = self.client.generate_presigned_url( - 'get_object', {'Bucket': accesspoint_arn, 'Key': 'mykey'}) - self.assert_signing_region_in_url(url, 'us-west-2') + "get_object", {"Bucket": accesspoint_arn, "Key": "mykey"} + ) + self.assert_signing_region_in_url(url, "us-west-2") def test_presign_signs_with_client_region_when_use_arn_region_false(self): - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" self.client, _ = self.create_stubbed_s3_client( - region_name='us-east-1', - config=Config( - signature_version='s3v4', s3={'use_arn_region': False} - ) + region_name="us-east-1", + config=Config(signature_version="s3v4", s3={"use_arn_region": False}), ) url = self.client.generate_presigned_url( - 'get_object', {'Bucket': accesspoint_arn, 'Key': 'mykey'}) - self.assert_signing_region_in_url(url, 'us-east-1') + "get_object", {"Bucket": accesspoint_arn, "Key": "mykey"} + ) + self.assert_signing_region_in_url(url, "us-east-1") def test_copy_source_str_with_accesspoint_arn(self): copy_source = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint/' - 'object/myprefix/myobject' + "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint/" + "object/myprefix/myobject" ) self.client, self.http_stubber = self.create_stubbed_s3_client() self.add_copy_object_response(self.http_stubber) - self.client.copy_object( - Bucket='mybucket', Key='mykey', CopySource=copy_source - ) + self.client.copy_object(Bucket="mybucket", Key="mykey", CopySource=copy_source) self.assert_expected_copy_source_header( self.http_stubber, expected_copy_source=( - b'arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A' - b'myendpoint/object/myprefix/myobject' - ) + b"arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A" + b"myendpoint/object/myprefix/myobject" + ), ) def test_copy_source_str_with_accesspoint_arn_and_version_id(self): copy_source = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint/' - 'object/myprefix/myobject?versionId=myversionid' + "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint/" + "object/myprefix/myobject?versionId=myversionid" ) self.client, self.http_stubber = self.create_stubbed_s3_client() self.add_copy_object_response(self.http_stubber) - self.client.copy_object( - Bucket='mybucket', Key='mykey', CopySource=copy_source - ) + self.client.copy_object(Bucket="mybucket", Key="mykey", CopySource=copy_source) self.assert_expected_copy_source_header( self.http_stubber, expected_copy_source=( - b'arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A' - b'myendpoint/object/myprefix/myobject?versionId=myversionid' - ) + b"arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A" + b"myendpoint/object/myprefix/myobject?versionId=myversionid" + ), ) def test_copy_source_dict_with_accesspoint_arn(self): copy_source = { - 'Bucket': - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint', - 'Key': 'myprefix/myobject', + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Key": "myprefix/myobject", } self.client, self.http_stubber = self.create_stubbed_s3_client() self.add_copy_object_response(self.http_stubber) - self.client.copy_object( - Bucket='mybucket', Key='mykey', CopySource=copy_source - ) + self.client.copy_object(Bucket="mybucket", Key="mykey", CopySource=copy_source) self.assert_expected_copy_source_header( self.http_stubber, expected_copy_source=( - b'arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A' - b'myendpoint/object/myprefix/myobject' - ) + b"arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A" + b"myendpoint/object/myprefix/myobject" + ), ) def test_copy_source_dict_with_accesspoint_arn_and_version_id(self): copy_source = { - 'Bucket': - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint', - 'Key': 'myprefix/myobject', - 'VersionId': 'myversionid' + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Key": "myprefix/myobject", + "VersionId": "myversionid", } self.client, self.http_stubber = self.create_stubbed_s3_client() self.add_copy_object_response(self.http_stubber) - self.client.copy_object( - Bucket='mybucket', Key='mykey', CopySource=copy_source - ) + self.client.copy_object(Bucket="mybucket", Key="mykey", CopySource=copy_source) self.assert_expected_copy_source_header( self.http_stubber, expected_copy_source=( - b'arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A' - b'myendpoint/object/myprefix/myobject?versionId=myversionid' - ) + b"arn%3Aaws%3As3%3Aus-west-2%3A123456789012%3Aaccesspoint%3A" + b"myendpoint/object/myprefix/myobject?versionId=myversionid" + ), ) def test_basic_outpost_arn(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' - 'op-01234567890123456:accesspoint:myaccesspoint' + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:" + "op-01234567890123456:accesspoint:myaccesspoint" ) self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-east-1') + region_name="us-east-1" + ) self.http_stubber.add_response() self.client.list_objects(Bucket=outpost_arn) request = self.http_stubber.requests[0] - self.assert_signing_name(request, 's3-outposts') - self.assert_signing_region(request, 'us-west-2') + self.assert_signing_name(request, "s3-outposts") + self.assert_signing_region(request, "us-west-2") expected_endpoint = ( - 'myaccesspoint-123456789012.op-01234567890123456.' - 's3-outposts.us-west-2.amazonaws.com' + "myaccesspoint-123456789012.op-01234567890123456." + "s3-outposts.us-west-2.amazonaws.com" ) self.assert_endpoint(request, expected_endpoint) - def test_basic_outpost_arn(self): + def test_basic_outpost_arn_custom_endpoint(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' - 'op-01234567890123456:accesspoint:myaccesspoint' + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:" + "op-01234567890123456:accesspoint:myaccesspoint" ) self.client, self.http_stubber = self.create_stubbed_s3_client( - endpoint_url='https://custom.com', - region_name='us-east-1') + endpoint_url="https://custom.com", region_name="us-east-1" + ) self.http_stubber.add_response() self.client.list_objects(Bucket=outpost_arn) request = self.http_stubber.requests[0] - self.assert_signing_name(request, 's3-outposts') - self.assert_signing_region(request, 'us-west-2') - expected_endpoint = ( - 'myaccesspoint-123456789012.op-01234567890123456.custom.com' - ) + self.assert_signing_name(request, "s3-outposts") + self.assert_signing_region(request, "us-west-2") + expected_endpoint = "myaccesspoint-123456789012.op-01234567890123456.custom.com" self.assert_endpoint(request, expected_endpoint) def test_outpost_arn_presigned_url(self): @@ -861,105 +732,102 @@ def test_outpost_arn_presigned_url_cross_region_arn(self): def test_outpost_arn_with_s3_accelerate(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' - 'op-01234567890123456:accesspoint:myaccesspoint' + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:" + "op-01234567890123456:accesspoint:myaccesspoint" ) self.client, _ = self.create_stubbed_s3_client( - config=Config(s3={'use_accelerate_endpoint': True})) + config=Config(s3={"use_accelerate_endpoint": True}) + ) with self.assertRaises(UnsupportedS3AccesspointConfigurationError): self.client.list_objects(Bucket=outpost_arn) def test_outpost_arn_with_s3_dualstack(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' - 'op-01234567890123456:accesspoint:myaccesspoint' + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:" + "op-01234567890123456:accesspoint:myaccesspoint" ) self.client, _ = self.create_stubbed_s3_client( - config=Config(s3={'use_dualstack_endpoint': True})) + config=Config(s3={"use_dualstack_endpoint": True}) + ) with self.assertRaises(UnsupportedS3AccesspointConfigurationError): self.client.list_objects(Bucket=outpost_arn) def test_incorrect_outpost_format(self): - outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost' - ) + outpost_arn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost" with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=outpost_arn) def test_incorrect_outpost_no_accesspoint(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' - 'op-01234567890123456' + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:" "op-01234567890123456" ) with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=outpost_arn) def test_incorrect_outpost_resource_format(self): - outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:myaccesspoint' - ) + outpost_arn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:myaccesspoint" with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=outpost_arn) def test_incorrect_outpost_sub_resources(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' - 'op-01234567890123456:accesspoint:mybucket:object:foo' + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:" + "op-01234567890123456:accesspoint:mybucket:object:foo" ) with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=outpost_arn) def test_incorrect_outpost_invalid_character(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' - 'op-0123456.890123456:accesspoint:myaccesspoint' + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:" + "op-0123456.890123456:accesspoint:myaccesspoint" ) with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=outpost_arn) def test_s3_object_lambda_arn_with_s3_dualstack(self): s3_object_lambda_arn = ( - 'arn:aws:s3-object-lambda:us-west-2:123456789012:' - 'accesspoint/myBanner' + "arn:aws:s3-object-lambda:us-west-2:123456789012:" "accesspoint/myBanner" ) self.client, _ = self.create_stubbed_s3_client( - config=Config(s3={'use_dualstack_endpoint': True})) + config=Config(s3={"use_dualstack_endpoint": True}) + ) with self.assertRaises(UnsupportedS3AccesspointConfigurationError): self.client.list_objects(Bucket=s3_object_lambda_arn) def test_s3_object_lambda_fips_raise_for_cross_region(self): s3_object_lambda_arn = ( - 'arn:aws-us-gov:s3-object-lambda:us-gov-east-1:123456789012:' - 'accesspoint/mybanner' + "arn:aws-us-gov:s3-object-lambda:us-gov-east-1:123456789012:" + "accesspoint/mybanner" ) self.client, _ = self.create_stubbed_s3_client( - region_name='fips-us-gov-west-1', - config=Config(s3={'use_arn_region': False}) + region_name="fips-us-gov-west-1", + config=Config(s3={"use_arn_region": False}), ) expected_exception = UnsupportedS3AccesspointConfigurationError - with self.assertRaisesRegex(expected_exception, - 'ARNs in another region are not allowed'): + with self.assertRaisesRegex( + expected_exception, "ARNs in another region are not allowed" + ): self.client.list_objects(Bucket=s3_object_lambda_arn) self.client, _ = self.create_stubbed_s3_client( - region_name='fips-us-gov-west-1', - config=Config(s3={'use_arn_region': True}) + region_name="fips-us-gov-west-1", config=Config(s3={"use_arn_region": True}) ) expected_exception = UnsupportedS3AccesspointConfigurationError with self.assertRaisesRegex( - expected_exception, 'does not allow for cross-region calls'): + expected_exception, "does not allow for cross-region calls" + ): self.client.list_objects(Bucket=s3_object_lambda_arn) def test_s3_object_lambda_with_global_regions(self): s3_object_lambda_arn = ( - 'arn:aws:s3-object-lambda:us-east-1:123456789012:' - 'accesspoint/mybanner' + "arn:aws:s3-object-lambda:us-east-1:123456789012:" "accesspoint/mybanner" ) expected_exception = UnsupportedS3AccesspointConfigurationError - expected_msg = 'a regional endpoint must be specified' - for region in ('aws-global', 's3-external-1'): + expected_msg = "a regional endpoint must be specified" + for region in ("aws-global", "s3-external-1"): self.client, _ = self.create_stubbed_s3_client( - region_name=region, config=Config(s3={'use_arn_region': False}) + region_name=region, config=Config(s3={"use_arn_region": False}) ) with self.assertRaisesRegex(expected_exception, expected_msg): self.client.list_objects(Bucket=s3_object_lambda_arn) @@ -968,211 +836,196 @@ def test_s3_object_lambda_arn_with_us_east_1(self): # test that us-east-1 region is not resolved # into s3 global endpoint s3_object_lambda_arn = ( - 'arn:aws:s3-object-lambda:us-east-1:123456789012:' - 'accesspoint/myBanner' + "arn:aws:s3-object-lambda:us-east-1:123456789012:" "accesspoint/myBanner" ) self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-east-1', - config=Config(s3={'use_arn_region': False}) + region_name="us-east-1", config=Config(s3={"use_arn_region": False}) ) self.http_stubber.add_response() self.client.list_objects(Bucket=s3_object_lambda_arn) request = self.http_stubber.requests[0] - self.assert_signing_name(request, 's3-object-lambda') - self.assert_signing_region(request, 'us-east-1') + self.assert_signing_name(request, "s3-object-lambda") + self.assert_signing_region(request, "us-east-1") expected_endpoint = ( - 'myBanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com' + "myBanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" ) self.assert_endpoint(request, expected_endpoint) def test_basic_s3_object_lambda_arn(self): s3_object_lambda_arn = ( - 'arn:aws:s3-object-lambda:us-west-2:123456789012:' - 'accesspoint/myBanner' + "arn:aws:s3-object-lambda:us-west-2:123456789012:" "accesspoint/myBanner" ) self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-east-1') + region_name="us-east-1" + ) self.http_stubber.add_response() self.client.list_objects(Bucket=s3_object_lambda_arn) request = self.http_stubber.requests[0] - self.assert_signing_name(request, 's3-object-lambda') - self.assert_signing_region(request, 'us-west-2') + self.assert_signing_name(request, "s3-object-lambda") + self.assert_signing_region(request, "us-west-2") expected_endpoint = ( - 'myBanner-123456789012.s3-object-lambda.us-west-2.amazonaws.com' + "myBanner-123456789012.s3-object-lambda.us-west-2.amazonaws.com" ) self.assert_endpoint(request, expected_endpoint) def test_outposts_raise_exception_if_fips_region(self): outpost_arn = ( - 'arn:aws:s3-outposts:us-gov-east-1:123456789012:outpost:' - 'op-01234567890123456:accesspoint:myaccesspoint' + "arn:aws:s3-outposts:us-gov-east-1:123456789012:outpost:" + "op-01234567890123456:accesspoint:myaccesspoint" ) - self.client, _ = self.create_stubbed_s3_client( - region_name='us-gov-east-1-fips') + self.client, _ = self.create_stubbed_s3_client(region_name="us-gov-east-1-fips") expected_exception = UnsupportedS3AccesspointConfigurationError - with self.assertRaisesRegex(expected_exception, - 'outpost ARNs do not support FIPS'): + with self.assertRaisesRegex( + expected_exception, "outpost ARNs do not support FIPS" + ): self.client.list_objects(Bucket=outpost_arn) def test_accesspoint_fips_raise_for_cross_region(self): s3_accesspoint_arn = ( - 'arn:aws-us-gov:s3:us-gov-east-1:123456789012:' - 'accesspoint:myendpoint' + "arn:aws-us-gov:s3:us-gov-east-1:123456789012:" "accesspoint:myendpoint" ) self.client, _ = self.create_stubbed_s3_client( - region_name='fips-us-gov-west-1', - config=Config(s3={'use_arn_region': False}) + region_name="fips-us-gov-west-1", + config=Config(s3={"use_arn_region": False}), ) expected_exception = UnsupportedS3AccesspointConfigurationError - with self.assertRaisesRegex(expected_exception, - 'ARNs in another region are not allowed'): + with self.assertRaisesRegex( + expected_exception, "ARNs in another region are not allowed" + ): self.client.list_objects(Bucket=s3_accesspoint_arn) self.client, _ = self.create_stubbed_s3_client( - region_name='fips-us-gov-west-1', - config=Config(s3={'use_arn_region': True}) + region_name="fips-us-gov-west-1", config=Config(s3={"use_arn_region": True}) ) expected_exception = UnsupportedS3AccesspointConfigurationError with self.assertRaisesRegex( - expected_exception, 'does not allow for cross-region'): + expected_exception, "does not allow for cross-region" + ): self.client.list_objects(Bucket=s3_accesspoint_arn) def test_accesspoint_with_global_regions(self): - s3_accesspoint_arn = ( - 'arn:aws:s3:us-east-1:123456789012:accesspoint:myendpoint' - ) + s3_accesspoint_arn = "arn:aws:s3:us-east-1:123456789012:accesspoint:myendpoint" self.client, _ = self.create_stubbed_s3_client( - region_name='aws-global', - config=Config(s3={'use_arn_region': False}) + region_name="aws-global", config=Config(s3={"use_arn_region": False}) ) expected_exception = UnsupportedS3AccesspointConfigurationError - with self.assertRaisesRegex(expected_exception, - 'regional endpoint must be specified'): + with self.assertRaisesRegex( + expected_exception, "regional endpoint must be specified" + ): self.client.list_objects(Bucket=s3_accesspoint_arn) # It shouldn't raise if use_arn_region is True self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='s3-external-1', - config=Config(s3={'use_arn_region': True}) + region_name="s3-external-1", config=Config(s3={"use_arn_region": True}) ) self.http_stubber.add_response() self.client.list_objects(Bucket=s3_accesspoint_arn) request = self.http_stubber.requests[0] expected_endpoint = ( - 'myendpoint-123456789012.s3-accesspoint.' - 'us-east-1.amazonaws.com' + "myendpoint-123456789012.s3-accesspoint." "us-east-1.amazonaws.com" ) self.assert_endpoint(request, expected_endpoint) # It shouldn't raise if no use_arn_region is specified since # use_arn_region defaults to True self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='s3-external-1', + region_name="s3-external-1", ) self.http_stubber.add_response() self.client.list_objects(Bucket=s3_accesspoint_arn) request = self.http_stubber.requests[0] expected_endpoint = ( - 'myendpoint-123456789012.s3-accesspoint.' - 'us-east-1.amazonaws.com' + "myendpoint-123456789012.s3-accesspoint." "us-east-1.amazonaws.com" ) self.assert_endpoint(request, expected_endpoint) @requires_crt() def test_mrap_arn_with_client_regions(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" region_tests = [ - ('us-east-1', 'mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com'), - ('us-west-2', 'mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com'), - ('aws-global', 'mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com'), + ("us-east-1", "mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com"), + ("us-west-2", "mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com"), + ("aws-global", "mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com"), ] for region, expected in region_tests: self._assert_mrap_endpoint(mrap_arn, region, expected) @requires_crt() def test_mrap_arn_with_other_partition(self): - mrap_arn = 'arn:aws-cn:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - expected = 'mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com.cn' - self._assert_mrap_endpoint(mrap_arn, 'cn-north-1', expected) + mrap_arn = "arn:aws-cn:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" + expected = "mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com.cn" + self._assert_mrap_endpoint(mrap_arn, "cn-north-1", expected) @requires_crt() def test_mrap_arn_with_invalid_s3_configs(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" config_tests = [ - ( - 'us-west-2', - Config(s3={'use_dualstack_endpoint': True}) - ), - ( - 'us-west-2', - Config(s3={'use_accelerate_endpoint': True}) - ) + ("us-west-2", Config(s3={"use_dualstack_endpoint": True})), + ("us-west-2", Config(s3={"use_accelerate_endpoint": True})), ] for region, config in config_tests: self._assert_mrap_config_failure(mrap_arn, region, config=config) @requires_crt() def test_mrap_arn_with_custom_endpoint(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - endpoint_url = 'https://test.endpoint.amazonaws.com' - expected = 'mfzwi23gnjvgw.mrap.test.endpoint.amazonaws.com' + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" + endpoint_url = "https://test.endpoint.amazonaws.com" + expected = "mfzwi23gnjvgw.mrap.test.endpoint.amazonaws.com" self._assert_mrap_endpoint( - mrap_arn, 'us-east-1', expected, endpoint_url=endpoint_url + mrap_arn, "us-east-1", expected, endpoint_url=endpoint_url ) @requires_crt() def test_mrap_arn_with_vpc_endpoint(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - endpoint_url = 'https://vpce-123-abc.vpce.s3-global.amazonaws.com' - expected = 'mfzwi23gnjvgw.mrap.vpce-123-abc.vpce.s3-global.amazonaws.com' + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" + endpoint_url = "https://vpce-123-abc.vpce.s3-global.amazonaws.com" + expected = "mfzwi23gnjvgw.mrap.vpce-123-abc.vpce.s3-global.amazonaws.com" self._assert_mrap_endpoint( - mrap_arn, 'us-west-2', expected, endpoint_url=endpoint_url + mrap_arn, "us-west-2", expected, endpoint_url=endpoint_url ) @requires_crt() def test_mrap_arn_with_disable_config_enabled(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - config = Config(s3={'s3_disable_multiregion_access_points': True}) - for region in ('us-west-2', 'aws-global'): + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" + config = Config(s3={"s3_disable_multiregion_access_points": True}) + for region in ("us-west-2", "aws-global"): self._assert_mrap_config_failure(mrap_arn, region, config) @requires_crt() def test_mrap_arn_with_disable_config_enabled_custom_endpoint(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:myendpoint' - config = Config(s3={'s3_disable_multiregion_access_points': True}) - self._assert_mrap_config_failure(mrap_arn, 'us-west-2', config) + mrap_arn = "arn:aws:s3::123456789012:accesspoint:myendpoint" + config = Config(s3={"s3_disable_multiregion_access_points": True}) + self._assert_mrap_config_failure(mrap_arn, "us-west-2", config) @requires_crt() def test_mrap_arn_with_disable_config_disabled(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - config = Config(s3={'s3_disable_multiregion_access_points': False}) - expected = 'mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com' - self._assert_mrap_endpoint(mrap_arn, 'us-west-2', expected, config=config) + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" + config = Config(s3={"s3_disable_multiregion_access_points": False}) + expected = "mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com" + self._assert_mrap_endpoint(mrap_arn, "us-west-2", expected, config=config) @requires_crt() def test_global_arn_without_mrap_suffix(self): global_arn_tests = [ ( - 'arn:aws:s3::123456789012:accesspoint:myendpoint', - 'myendpoint.accesspoint.s3-global.amazonaws.com', + "arn:aws:s3::123456789012:accesspoint:myendpoint", + "myendpoint.accesspoint.s3-global.amazonaws.com", ), ( - 'arn:aws:s3::123456789012:accesspoint:my.bucket', - 'my.bucket.accesspoint.s3-global.amazonaws.com', + "arn:aws:s3::123456789012:accesspoint:my.bucket", + "my.bucket.accesspoint.s3-global.amazonaws.com", ), ] for arn, expected in global_arn_tests: - self._assert_mrap_endpoint(arn, 'us-west-2', expected) + self._assert_mrap_endpoint(arn, "us-west-2", expected) @requires_crt() def test_mrap_signing_algorithm_is_sigv4a(self): - s3_accesspoint_arn = ( - 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - ) + s3_accesspoint_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-west-2' + region_name="us-west-2" ) self.http_stubber.add_response() self.client.list_objects(Bucket=s3_accesspoint_arn) @@ -1181,22 +1034,26 @@ def test_mrap_signing_algorithm_is_sigv4a(self): @requires_crt() def test_mrap_presigned_url(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - config = Config(s3={'s3_disable_multiregion_access_points': False}) - expected_url = 'mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com' - self._assert_mrap_presigned_url(mrap_arn, 'us-west-2', expected_url, config=config) + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" + config = Config(s3={"s3_disable_multiregion_access_points": False}) + expected_url = "mfzwi23gnjvgw.mrap.accesspoint.s3-global.amazonaws.com" + self._assert_mrap_presigned_url( + mrap_arn, "us-west-2", expected_url, config=config + ) @requires_crt() def test_mrap_presigned_url_disabled(self): - mrap_arn = 'arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap' - config = Config(s3={'s3_disable_multiregion_access_points': True}) - self._assert_mrap_config_presigned_failure(mrap_arn, 'us-west-2', config) + mrap_arn = "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" + config = Config(s3={"s3_disable_multiregion_access_points": True}) + self._assert_mrap_config_presigned_failure(mrap_arn, "us-west-2", config) def _assert_mrap_config_failure(self, arn, region, config): self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name=region, config=config) - with self.assertRaises(botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): + region_name=region, config=config + ) + with self.assertRaises( + botocore.exceptions.UnsupportedS3AccesspointConfigurationError + ): self.client.list_objects(Bucket=arn) @FreezeTime(botocore.auth.datetime, date=DATE) @@ -1229,7 +1086,6 @@ def _assert_presigned_url( assert expected_signature == query_parts['X-Amz-Signature'] assert expected_credentials in query_parts['X-Amz-Credential'] - def _assert_mrap_presigned_url( self, arn, region, expected, endpoint_url=None, config=None ): @@ -1239,36 +1095,35 @@ def _assert_mrap_presigned_url( url_parts = urlsplit(presigned_url) self.assertEqual(expected, url_parts.hostname) # X-Amz-Region-Set header MUST be * (percent-encoded as %2A) for MRAPs - self.assertIn('X-Amz-Region-Set=%2A', url_parts.query) + self.assertIn("X-Amz-Region-Set=%2A", url_parts.query) def _assert_mrap_config_presigned_failure(self, arn, region, config): self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name=region, config=config) - with self.assertRaises(botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): + region_name=region, config=config + ) + with self.assertRaises( + botocore.exceptions.UnsupportedS3AccesspointConfigurationError + ): self.client.generate_presigned_url( - 'get_object', - Params={'Bucket': arn, 'Key': 'test_object'} + "get_object", Params={"Bucket": arn, "Key": "test_object"} ) def _assert_mrap_endpoint( self, arn, region, expected, endpoint_url=None, config=None ): self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name=region, endpoint_url=endpoint_url, config=config) + region_name=region, endpoint_url=endpoint_url, config=config + ) self.http_stubber.add_response() self.client.list_objects(Bucket=arn) request = self.http_stubber.requests[0] self.assert_endpoint(request, expected) # MRAP requests MUST include a global signing region stored in the # X-Amz-Region-Set header as *. - self.assert_header_matches(request, 'X-Amz-Region-Set', b'*') + self.assert_header_matches(request, "X-Amz-Region-Set", b"*") def _assert_sigv4a_used(self, headers): - self.assertIn( - b'AWS4-ECDSA-P256-SHA256', headers.get('Authorization', '') - ) - + self.assertIn(b"AWS4-ECDSA-P256-SHA256", headers.get("Authorization", "")) class TestOnlyAsciiCharsAllowed(BaseS3OperationTest): @@ -1277,8 +1132,10 @@ def test_validates_non_ascii_chars_trigger_validation_error(self): with self.http_stubber: with self.assertRaises(ParamValidationError): self.client.put_object( - Bucket='foo', Key='bar', Metadata={ - 'goodkey': 'good', 'non-ascii': u'\u2713'}) + Bucket="foo", + Key="bar", + Metadata={"goodkey": "good", "non-ascii": u"\u2713"}, + ) class TestS3GetBucketLifecycle(BaseS3OperationTest): @@ -1287,47 +1144,47 @@ def test_multiple_transitions_returns_one(self): '' '' - ' ' - ' transitionRule' - ' foo' - ' Enabled' - ' ' - ' 40' - ' STANDARD_IA' - ' ' - ' ' - ' 70' - ' GLACIER' - ' ' - ' ' - ' ' - ' noncurrentVersionRule' - ' bar' - ' Enabled' - ' ' - ' 40' - ' STANDARD_IA' - ' ' - ' ' - ' 70' - ' GLACIER' - ' ' - ' ' - '' - ).encode('utf-8') - s3 = self.session.create_client('s3') + " " + " transitionRule" + " foo" + " Enabled" + " " + " 40" + " STANDARD_IA" + " " + " " + " 70" + " GLACIER" + " " + " " + " " + " noncurrentVersionRule" + " bar" + " Enabled" + " " + " 40" + " STANDARD_IA" + " " + " " + " 70" + " GLACIER" + " " + " " + "" + ).encode("utf-8") + s3 = self.session.create_client("s3") with ClientHTTPStubber(s3) as http_stubber: http_stubber.add_response(body=response_body) - response = s3.get_bucket_lifecycle(Bucket='mybucket') + response = s3.get_bucket_lifecycle(Bucket="mybucket") # Each Transition member should have at least one of the # transitions provided. self.assertEqual( - response['Rules'][0]['Transition'], - {'Days': 40, 'StorageClass': 'STANDARD_IA'} + response["Rules"][0]["Transition"], + {"Days": 40, "StorageClass": "STANDARD_IA"}, ) self.assertEqual( - response['Rules'][1]['NoncurrentVersionTransition'], - {'NoncurrentDays': 40, 'StorageClass': 'STANDARD_IA'} + response["Rules"][1]["NoncurrentVersionTransition"], + {"NoncurrentDays": 40, "StorageClass": "STANDARD_IA"}, ) @@ -1348,21 +1205,21 @@ def test_500_error_with_non_xml_body(self): # We are unsure of what exactly causes the response to be mangled # but we expect it to be how 100 continues are handled. non_xml_content = ( - 'x-amz-id-2: foo\r\n' - 'x-amz-request-id: bar\n' - 'Date: Tue, 06 Oct 2015 03:20:38 GMT\r\n' + "x-amz-id-2: foo\r\n" + "x-amz-request-id: bar\n" + "Date: Tue, 06 Oct 2015 03:20:38 GMT\r\n" 'ETag: "a6d856bc171fc6aa1b236680856094e2"\r\n' - 'Content-Length: 0\r\n' - 'Server: AmazonS3\r\n' - ).encode('utf-8') - s3 = self.session.create_client('s3') + "Content-Length: 0\r\n" + "Server: AmazonS3\r\n" + ).encode("utf-8") + s3 = self.session.create_client("s3") with ClientHTTPStubber(s3) as http_stubber: http_stubber.add_response(status=500, body=non_xml_content) http_stubber.add_response() - response = s3.put_object(Bucket='mybucket', Key='mykey', Body=b'foo') + response = s3.put_object(Bucket="mybucket", Key="mykey", Body=b"foo") # The first response should have been retried even though the xml is # invalid and eventually return the 200 response. - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) self.assertEqual(len(http_stubber.requests), 2) @@ -1374,48 +1231,49 @@ def create_stubbed_s3_client(self, **kwargs): return client, http_stubber def test_endpoint_redirection(self): - regions = ['us-west-2', 'us-east-1'] + regions = ["us-west-2", "us-east-1"] for region in regions: self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name=region) + region_name=region + ) self.http_stubber.add_response() self.client.write_get_object_response( - RequestRoute='endpoint-io.a1c1d5c7', - RequestToken='SecretToken', + RequestRoute="endpoint-io.a1c1d5c7", + RequestToken="SecretToken", ) request = self.http_stubber.requests[0] - self.assert_signing_name(request, 's3-object-lambda') + self.assert_signing_name(request, "s3-object-lambda") self.assert_signing_region(request, region) expected_endpoint = ( - 'endpoint-io.a1c1d5c7.s3-object-lambda.' - '%s.amazonaws.com' % region + "endpoint-io.a1c1d5c7.s3-object-lambda." "%s.amazonaws.com" % region ) self.assert_endpoint(request, expected_endpoint) def test_endpoint_redirection_fails_with_custom_endpoint(self): self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-west-2', endpoint_url="https://example.com") + region_name="us-west-2", endpoint_url="https://example.com" + ) self.http_stubber.add_response() self.client.write_get_object_response( - RequestRoute='endpoint-io.a1c1d5c7', - RequestToken='SecretToken', + RequestRoute="endpoint-io.a1c1d5c7", + RequestToken="SecretToken", ) request = self.http_stubber.requests[0] - self.assert_signing_name(request, 's3-object-lambda') - self.assert_signing_region(request, 'us-west-2') - self.assert_endpoint(request, 'endpoint-io.a1c1d5c7.example.com') + self.assert_signing_name(request, "s3-object-lambda") + self.assert_signing_region(request, "us-west-2") + self.assert_endpoint(request, "endpoint-io.a1c1d5c7.example.com") def test_endpoint_redirection_fails_with_accelerate_endpoint(self): - config = Config(s3={'use_accelerate_endpoint': True}) + config = Config(s3={"use_accelerate_endpoint": True}) self.client, self.http_stubber = self.create_stubbed_s3_client( - region_name='us-west-2', + region_name="us-west-2", config=config, ) self.http_stubber.add_response() with self.assertRaises(UnsupportedS3ConfigurationError): self.client.write_get_object_response( - RequestRoute='endpoint-io.a1c1d5c7', - RequestToken='SecretToken', + RequestRoute="endpoint-io.a1c1d5c7", + RequestToken="SecretToken", ) @@ -1423,7 +1281,8 @@ class TestS3SigV4(BaseS3OperationTest): def setUp(self): super(TestS3SigV4, self).setUp() self.client = self.session.create_client( - 's3', self.region, config=Config(signature_version='s3v4')) + "s3", self.region, config=Config(signature_version="s3v4") + ) self.http_stubber = ClientHTTPStubber(self.client) self.http_stubber.add_response() @@ -1432,147 +1291,146 @@ def get_sent_headers(self): def test_content_md5_set(self): with self.http_stubber: - self.client.put_object(Bucket='foo', Key='bar', Body='baz') - self.assertIn('content-md5', self.get_sent_headers()) + self.client.put_object(Bucket="foo", Key="bar", Body="baz") + self.assertIn("content-md5", self.get_sent_headers()) def test_content_md5_set_empty_body(self): with self.http_stubber: - self.client.put_object(Bucket='foo', Key='bar', Body='') - self.assertIn('content-md5', self.get_sent_headers()) + self.client.put_object(Bucket="foo", Key="bar", Body="") + self.assertIn("content-md5", self.get_sent_headers()) def test_content_md5_set_empty_file(self): with self.http_stubber: - with temporary_file('rb') as f: - assert f.read() == b'' - self.client.put_object(Bucket='foo', Key='bar', Body=f) - self.assertIn('content-md5', self.get_sent_headers()) + with temporary_file("rb") as f: + assert f.read() == b"" + self.client.put_object(Bucket="foo", Key="bar", Body=f) + self.assertIn("content-md5", self.get_sent_headers()) def test_content_sha256_set_if_config_value_is_true(self): - config = Config(signature_version='s3v4', s3={ - 'payload_signing_enabled': True - }) - self.client = self.session.create_client( - 's3', self.region, config=config) + config = Config(signature_version="s3v4", s3={"payload_signing_enabled": True}) + self.client = self.session.create_client("s3", self.region, config=config) self.http_stubber = ClientHTTPStubber(self.client) self.http_stubber.add_response() with self.http_stubber: - self.client.put_object(Bucket='foo', Key='bar', Body='baz') + self.client.put_object(Bucket="foo", Key="bar", Body="baz") sent_headers = self.get_sent_headers() - sha_header = sent_headers.get('x-amz-content-sha256') - self.assertNotEqual(sha_header, b'UNSIGNED-PAYLOAD') + sha_header = sent_headers.get("x-amz-content-sha256") + self.assertNotEqual(sha_header, b"UNSIGNED-PAYLOAD") def test_content_sha256_not_set_if_config_value_is_false(self): - config = Config(signature_version='s3v4', s3={ - 'payload_signing_enabled': False - }) - self.client = self.session.create_client( - 's3', self.region, config=config) + config = Config(signature_version="s3v4", s3={"payload_signing_enabled": False}) + self.client = self.session.create_client("s3", self.region, config=config) self.http_stubber = ClientHTTPStubber(self.client) self.http_stubber.add_response() with self.http_stubber: - self.client.put_object(Bucket='foo', Key='bar', Body='baz') + self.client.put_object(Bucket="foo", Key="bar", Body="baz") sent_headers = self.get_sent_headers() - sha_header = sent_headers.get('x-amz-content-sha256') - self.assertEqual(sha_header, b'UNSIGNED-PAYLOAD') + sha_header = sent_headers.get("x-amz-content-sha256") + self.assertEqual(sha_header, b"UNSIGNED-PAYLOAD") def test_content_sha256_set_if_md5_is_unavailable(self): - with mock.patch('botocore.auth.MD5_AVAILABLE', False): - with mock.patch('botocore.utils.MD5_AVAILABLE', False): + with mock.patch("botocore.compat.MD5_AVAILABLE", False): + with mock.patch("botocore.utils.MD5_AVAILABLE", False): with self.http_stubber: - self.client.put_object(Bucket='foo', Key='bar', Body='baz') + self.client.put_object(Bucket="foo", Key="bar", Body="baz") sent_headers = self.get_sent_headers() - unsigned = 'UNSIGNED-PAYLOAD' - self.assertNotEqual(sent_headers['x-amz-content-sha256'], unsigned) - self.assertNotIn('content-md5', sent_headers) + unsigned = "UNSIGNED-PAYLOAD" + self.assertNotEqual(sent_headers["x-amz-content-sha256"], unsigned) + self.assertNotIn("content-md5", sent_headers) class TestCanSendIntegerHeaders(BaseSessionTest): - def test_int_values_with_sigv4(self): - s3 = self.session.create_client( - 's3', config=Config(signature_version='s3v4')) + s3 = self.session.create_client("s3", config=Config(signature_version="s3v4")) with ClientHTTPStubber(s3) as http_stubber: http_stubber.add_response() - s3.upload_part(Bucket='foo', Key='bar', Body=b'foo', - UploadId='bar', PartNumber=1, ContentLength=3) + s3.upload_part( + Bucket="foo", + Key="bar", + Body=b"foo", + UploadId="bar", + PartNumber=1, + ContentLength=3, + ) headers = http_stubber.requests[0].headers # Verify that the request integer value of 3 has been converted to # string '3'. This also means we've made it pass the signer which # expects string values in order to sign properly. - self.assertEqual(headers['Content-Length'], b'3') + self.assertEqual(headers["Content-Length"], b"3") class TestRegionRedirect(BaseS3OperationTest): def setUp(self): super(TestRegionRedirect, self).setUp() self.client = self.session.create_client( - 's3', 'us-west-2', config=Config( - signature_version='s3v4', - s3={'addressing_style': 'path'}, - )) + "s3", + "us-west-2", + config=Config( + signature_version="s3v4", + s3={"addressing_style": "path"}, + ), + ) self.http_stubber = ClientHTTPStubber(self.client) self.redirect_response = { - 'status': 301, - 'headers': {'x-amz-bucket-region': 'eu-central-1'}, - 'body': ( + "status": 301, + "headers": {"x-amz-bucket-region": "eu-central-1"}, + "body": ( b'\n' - b'' - b' PermanentRedirect' - b' The bucket you are attempting to access must be' - b' addressed using the specified endpoint. Please send ' - b' all future requests to this endpoint.' - b' ' - b' foo' - b' foo.s3.eu-central-1.amazonaws.com' - b'' - ) + b"" + b" PermanentRedirect" + b" The bucket you are attempting to access must be" + b" addressed using the specified endpoint. Please send " + b" all future requests to this endpoint." + b" " + b" foo" + b" foo.s3.eu-central-1.amazonaws.com" + b"" + ), } self.bad_signing_region_response = { - 'status': 400, - 'headers': {'x-amz-bucket-region': 'eu-central-1'}, - 'body': ( + "status": 400, + "headers": {"x-amz-bucket-region": "eu-central-1"}, + "body": ( b'' - b'' - b' AuthorizationHeaderMalformed' - b' the region us-west-2 is wrong; ' - b'expecting eu-central-1' - b' eu-central-1' - b' BD9AA1730D454E39' - b' ' - b'' - ) + b"" + b" AuthorizationHeaderMalformed" + b" the region us-west-2 is wrong; " + b"expecting eu-central-1" + b" eu-central-1" + b" BD9AA1730D454E39" + b" " + b"" + ), } self.success_response = { - 'status': 200, - 'headers': {}, - 'body': ( + "status": 200, + "headers": {}, + "body": ( b'\n' - b'' - b' foo' - b' ' - b' ' - b' 1000' - b' url' - b' false' - b'' - ) + b"" + b" foo" + b" " + b" " + b" 1000" + b" url" + b" false" + b"" + ), } def test_region_redirect(self): self.http_stubber.add_response(**self.redirect_response) self.http_stubber.add_response(**self.success_response) with self.http_stubber: - response = self.client.list_objects(Bucket='foo') - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) + response = self.client.list_objects(Bucket="foo") + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) self.assertEqual(len(self.http_stubber.requests), 2) - initial_url = ('https://s3.us-west-2.amazonaws.com/foo' - '?encoding-type=url') + initial_url = "https://s3.us-west-2.amazonaws.com/foo" "?encoding-type=url" self.assertEqual(self.http_stubber.requests[0].url, initial_url) - fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo' - '?encoding-type=url') + fixed_url = "https://s3.eu-central-1.amazonaws.com/foo" "?encoding-type=url" self.assertEqual(self.http_stubber.requests[1].url, fixed_url) def test_region_redirect_cache(self): @@ -1581,21 +1439,17 @@ def test_region_redirect_cache(self): self.http_stubber.add_response(**self.success_response) with self.http_stubber: - first_response = self.client.list_objects(Bucket='foo') - second_response = self.client.list_objects(Bucket='foo') + first_response = self.client.list_objects(Bucket="foo") + second_response = self.client.list_objects(Bucket="foo") - self.assertEqual( - first_response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertEqual( - second_response['ResponseMetadata']['HTTPStatusCode'], 200) + self.assertEqual(first_response["ResponseMetadata"]["HTTPStatusCode"], 200) + self.assertEqual(second_response["ResponseMetadata"]["HTTPStatusCode"], 200) self.assertEqual(len(self.http_stubber.requests), 3) - initial_url = ('https://s3.us-west-2.amazonaws.com/foo' - '?encoding-type=url') + initial_url = "https://s3.us-west-2.amazonaws.com/foo" "?encoding-type=url" self.assertEqual(self.http_stubber.requests[0].url, initial_url) - fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo' - '?encoding-type=url') + fixed_url = "https://s3.eu-central-1.amazonaws.com/foo" "?encoding-type=url" self.assertEqual(self.http_stubber.requests[1].url, fixed_url) self.assertEqual(self.http_stubber.requests[2].url, fixed_url) @@ -1603,72 +1457,68 @@ def test_resign_request_with_region_when_needed(self): # Create a client with no explicit configuration so we can # verify the default behavior. - client = self.session.create_client('s3', 'us-west-2') + client = self.session.create_client("s3", "us-west-2") with ClientHTTPStubber(client) as http_stubber: http_stubber.add_response(**self.bad_signing_region_response) http_stubber.add_response(**self.success_response) - first_response = client.list_objects(Bucket='foo') - self.assertEqual( - first_response['ResponseMetadata']['HTTPStatusCode'], 200) + first_response = client.list_objects(Bucket="foo") + self.assertEqual(first_response["ResponseMetadata"]["HTTPStatusCode"], 200) self.assertEqual(len(http_stubber.requests), 2) - initial_url = ('https://foo.s3.us-west-2.amazonaws.com/' - '?encoding-type=url') + initial_url = "https://foo.s3.us-west-2.amazonaws.com/" "?encoding-type=url" self.assertEqual(http_stubber.requests[0].url, initial_url) - fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/' - '?encoding-type=url') + fixed_url = ( + "https://foo.s3.eu-central-1.amazonaws.com/" "?encoding-type=url" + ) self.assertEqual(http_stubber.requests[1].url, fixed_url) def test_resign_request_in_us_east_1(self): - region_headers = {'x-amz-bucket-region': 'eu-central-1'} + region_headers = {"x-amz-bucket-region": "eu-central-1"} # Verify that the default behavior in us-east-1 will redirect - client = self.session.create_client('s3', 'us-east-1') + client = self.session.create_client("s3", "us-east-1") with ClientHTTPStubber(client) as http_stubber: http_stubber.add_response(status=400) http_stubber.add_response(status=400, headers=region_headers) http_stubber.add_response(headers=region_headers) http_stubber.add_response() - response = client.head_object(Bucket='foo', Key='bar') - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) + response = client.head_object(Bucket="foo", Key="bar") + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) self.assertEqual(len(http_stubber.requests), 4) - initial_url = ('https://foo.s3.amazonaws.com/bar') + initial_url = "https://foo.s3.amazonaws.com/bar" self.assertEqual(http_stubber.requests[0].url, initial_url) - fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/bar') + fixed_url = "https://foo.s3.eu-central-1.amazonaws.com/bar" self.assertEqual(http_stubber.requests[-1].url, fixed_url) def test_resign_request_in_us_east_1_fails(self): - region_headers = {'x-amz-bucket-region': 'eu-central-1'} + region_headers = {"x-amz-bucket-region": "eu-central-1"} # Verify that the final 400 response is propagated # back to the user. - client = self.session.create_client('s3', 'us-east-1') + client = self.session.create_client("s3", "us-east-1") with ClientHTTPStubber(client) as http_stubber: http_stubber.add_response(status=400) http_stubber.add_response(status=400, headers=region_headers) http_stubber.add_response(headers=region_headers) # The final request still fails with a 400. http_stubber.add_response(status=400) - with self.assertRaises(ClientError) as e: - client.head_object(Bucket='foo', Key='bar') + with self.assertRaises(ClientError): + client.head_object(Bucket="foo", Key="bar") self.assertEqual(len(http_stubber.requests), 4) def test_no_region_redirect_for_accesspoint(self): self.http_stubber.add_response(**self.redirect_response) - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" with self.http_stubber: try: self.client.list_objects(Bucket=accesspoint_arn) except self.client.exceptions.ClientError as e: - self.assertEqual( - e.response['Error']['Code'], 'PermanentRedirect') + self.assertEqual(e.response["Error"]["Code"], "PermanentRedirect") else: - self.fail('PermanentRedirect error should have been raised') + self.fail("PermanentRedirect error should have been raised") class TestGeneratePresigned(BaseS3OperationTest): @@ -1677,866 +1527,1122 @@ def assert_is_v2_presigned_url(self, url): # Assert that it looks like a v2 presigned url by asserting it does # not have a couple of the v4 qs components and assert that it has the # v2 Signature component. - self.assertNotIn('X-Amz-Credential', qs_components) - self.assertNotIn('X-Amz-Algorithm', qs_components) - self.assertIn('Signature', qs_components) + self.assertNotIn("X-Amz-Credential", qs_components) + self.assertNotIn("X-Amz-Algorithm", qs_components) + self.assertIn("Signature", qs_components) def test_generate_unauthed_url(self): config = Config(signature_version=botocore.UNSIGNED) - client = self.session.create_client('s3', self.region, config=config) + client = self.session.create_client("s3", self.region, config=config) url = client.generate_presigned_url( - ClientMethod='get_object', - Params={ - 'Bucket': 'foo', - 'Key': 'bar' - }) - self.assertEqual(url, 'https://foo.s3.amazonaws.com/bar') + ClientMethod="get_object", Params={"Bucket": "foo", "Key": "bar"} + ) + self.assertEqual(url, "https://foo.s3.amazonaws.com/bar") def test_generate_unauthed_post(self): config = Config(signature_version=botocore.UNSIGNED) - client = self.session.create_client('s3', self.region, config=config) - parts = client.generate_presigned_post(Bucket='foo', Key='bar') - expected = { - 'fields': {'key': 'bar'}, - 'url': 'https://foo.s3.amazonaws.com/' - } + client = self.session.create_client("s3", self.region, config=config) + parts = client.generate_presigned_post(Bucket="foo", Key="bar") + expected = {"fields": {"key": "bar"}, "url": "https://foo.s3.amazonaws.com/"} self.assertEqual(parts, expected) def test_default_presign_uses_sigv2(self): - url = self.client.generate_presigned_url(ClientMethod='list_buckets') - self.assertNotIn('Algorithm=AWS4-HMAC-SHA256', url) + url = self.client.generate_presigned_url(ClientMethod="list_buckets") + self.assertNotIn("Algorithm=AWS4-HMAC-SHA256", url) def test_sigv4_presign(self): - config = Config(signature_version='s3v4') - client = self.session.create_client('s3', self.region, config=config) - url = client.generate_presigned_url(ClientMethod='list_buckets') - self.assertIn('Algorithm=AWS4-HMAC-SHA256', url) + config = Config(signature_version="s3v4") + client = self.session.create_client("s3", self.region, config=config) + url = client.generate_presigned_url(ClientMethod="list_buckets") + self.assertIn("Algorithm=AWS4-HMAC-SHA256", url) def test_sigv2_presign(self): - config = Config(signature_version='s3') - client = self.session.create_client('s3', self.region, config=config) - url = client.generate_presigned_url(ClientMethod='list_buckets') - self.assertNotIn('Algorithm=AWS4-HMAC-SHA256', url) + config = Config(signature_version="s3") + client = self.session.create_client("s3", self.region, config=config) + url = client.generate_presigned_url(ClientMethod="list_buckets") + self.assertNotIn("Algorithm=AWS4-HMAC-SHA256", url) def test_uses_sigv4_for_unknown_region(self): - client = self.session.create_client('s3', 'us-west-88') - url = client.generate_presigned_url(ClientMethod='list_buckets') - self.assertIn('Algorithm=AWS4-HMAC-SHA256', url) + client = self.session.create_client("s3", "us-west-88") + url = client.generate_presigned_url(ClientMethod="list_buckets") + self.assertIn("Algorithm=AWS4-HMAC-SHA256", url) def test_default_presign_sigv4_in_sigv4_only_region(self): - client = self.session.create_client('s3', 'us-east-2') - url = client.generate_presigned_url(ClientMethod='list_buckets') - self.assertIn('Algorithm=AWS4-HMAC-SHA256', url) + client = self.session.create_client("s3", "us-east-2") + url = client.generate_presigned_url(ClientMethod="list_buckets") + self.assertIn("Algorithm=AWS4-HMAC-SHA256", url) def test_presign_unsigned(self): config = Config(signature_version=botocore.UNSIGNED) - client = self.session.create_client('s3', 'us-east-2', config=config) - url = client.generate_presigned_url(ClientMethod='list_buckets') - self.assertEqual( - 'https://s3.us-east-2.amazonaws.com/', url) + client = self.session.create_client("s3", "us-east-2", config=config) + url = client.generate_presigned_url(ClientMethod="list_buckets") + self.assertEqual("https://s3.us-east-2.amazonaws.com/", url) def test_presign_url_with_ssec(self): - config = Config(signature_version='s3') - client = self.session.create_client('s3', 'us-east-1', config=config) + config = Config(signature_version="s3") + client = self.session.create_client("s3", "us-east-1", config=config) url = client.generate_presigned_url( - ClientMethod='get_object', + ClientMethod="get_object", Params={ - 'Bucket': 'mybucket', - 'Key': 'mykey', - 'SSECustomerKey': 'a' * 32, - 'SSECustomerAlgorithm': 'AES256' - } + "Bucket": "mybucket", + "Key": "mykey", + "SSECustomerKey": "a" * 32, + "SSECustomerAlgorithm": "AES256", + }, ) # The md5 of the sse-c key will be injected when parameters are # built so it should show up in the presigned url as well. - self.assertIn( - 'x-amz-server-side-encryption-customer-key-md5=', url - ) + self.assertIn("x-amz-server-side-encryption-customer-key-md5=", url) def test_presign_s3_accelerate(self): - config = Config(signature_version=botocore.UNSIGNED, - s3={'use_accelerate_endpoint': True}) - client = self.session.create_client('s3', 'us-east-1', config=config) + config = Config( + signature_version=botocore.UNSIGNED, s3={"use_accelerate_endpoint": True} + ) + client = self.session.create_client("s3", "us-east-1", config=config) url = client.generate_presigned_url( - ClientMethod='get_object', - Params={'Bucket': 'mybucket', 'Key': 'mykey'} + ClientMethod="get_object", Params={"Bucket": "mybucket", "Key": "mykey"} ) # The url should be the accelerate endpoint - self.assertEqual( - 'https://mybucket.s3-accelerate.amazonaws.com/mykey', url) + self.assertEqual("https://mybucket.s3-accelerate.amazonaws.com/mykey", url) def test_presign_post_s3_accelerate(self): - config = Config(signature_version=botocore.UNSIGNED, - s3={'use_accelerate_endpoint': True}) - client = self.session.create_client('s3', 'us-east-1', config=config) - parts = client.generate_presigned_post( - Bucket='mybucket', Key='mykey') + config = Config( + signature_version=botocore.UNSIGNED, s3={"use_accelerate_endpoint": True} + ) + client = self.session.create_client("s3", "us-east-1", config=config) + parts = client.generate_presigned_post(Bucket="mybucket", Key="mykey") # The url should be the accelerate endpoint expected = { - 'fields': {'key': 'mykey'}, - 'url': 'https://mybucket.s3-accelerate.amazonaws.com/' + "fields": {"key": "mykey"}, + "url": "https://mybucket.s3-accelerate.amazonaws.com/", } self.assertEqual(parts, expected) def test_presign_uses_v2_for_aws_global(self): - client = self.session.create_client('s3', 'aws-global') + client = self.session.create_client("s3", "aws-global") url = client.generate_presigned_url( - 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + "get_object", {"Bucket": "mybucket", "Key": "mykey"} + ) self.assert_is_v2_presigned_url(url) def test_presign_uses_v2_for_default_region_with_us_east_1_regional(self): - config = Config(s3={'us_east_1_regional_endpoint': 'regional'}) - client = self.session.create_client('s3', config=config) + config = Config(s3={"us_east_1_regional_endpoint": "regional"}) + client = self.session.create_client("s3", config=config) url = client.generate_presigned_url( - 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + "get_object", {"Bucket": "mybucket", "Key": "mykey"} + ) self.assert_is_v2_presigned_url(url) def test_presign_uses_v2_for_aws_global_with_us_east_1_regional(self): - config = Config(s3={'us_east_1_regional_endpoint': 'regional'}) - client = self.session.create_client('s3', 'aws-global', config=config) + config = Config(s3={"us_east_1_regional_endpoint": "regional"}) + client = self.session.create_client("s3", "aws-global", config=config) url = client.generate_presigned_url( - 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + "get_object", {"Bucket": "mybucket", "Key": "mykey"} + ) self.assert_is_v2_presigned_url(url) def test_presign_uses_v2_for_us_east_1(self): - client = self.session.create_client('s3', 'us-east-1') + client = self.session.create_client("s3", "us-east-1") url = client.generate_presigned_url( - 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + "get_object", {"Bucket": "mybucket", "Key": "mykey"} + ) self.assert_is_v2_presigned_url(url) def test_presign_uses_v2_for_us_east_1_with_us_east_1_regional(self): - config = Config(s3={'us_east_1_regional_endpoint': 'regional'}) - client = self.session.create_client('s3', 'us-east-1', config=config) + config = Config(s3={"us_east_1_regional_endpoint": "regional"}) + client = self.session.create_client("s3", "us-east-1", config=config) url = client.generate_presigned_url( - 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) + "get_object", {"Bucket": "mybucket", "Key": "mykey"} + ) self.assert_is_v2_presigned_url(url) -def _checksum_test_cases(): - yield ('put_bucket_tagging', - {"Bucket": "foo", "Tagging":{"TagSet":[]}}) - yield ('put_bucket_lifecycle', - {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}}) - yield ('put_bucket_lifecycle_configuration', - {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}}) - yield ('put_bucket_cors', - {"Bucket": "foo", "CORSConfiguration":{"CORSRules": []}}) - yield ('delete_objects', - {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}}) - yield ('put_bucket_replication', - {"Bucket": "foo", - "ReplicationConfiguration": {"Role":"", "Rules": []}}) - yield ('put_bucket_acl', - {"Bucket": "foo", "AccessControlPolicy":{}}) - yield ('put_bucket_logging', - {"Bucket": "foo", - "BucketLoggingStatus":{}}) - yield ('put_bucket_notification', - {"Bucket": "foo", "NotificationConfiguration":{}}) - yield ('put_bucket_policy', - {"Bucket": "foo", "Policy": ""}) - yield ('put_bucket_request_payment', - {"Bucket": "foo", "RequestPaymentConfiguration":{"Payer": ""}}) - yield ('put_bucket_versioning', - {"Bucket": "foo", "VersioningConfiguration":{}}) - yield ('put_bucket_website', - {"Bucket": "foo", - "WebsiteConfiguration":{}}) - yield ('put_object_acl', - {"Bucket": "foo", "Key": "bar", "AccessControlPolicy":{}}) - yield ('put_object_legal_hold', - {"Bucket": "foo", "Key": "bar", "LegalHold":{"Status": "ON"}}) - yield ('put_object_retention', - {"Bucket": "foo", "Key": "bar", - "Retention":{"RetainUntilDate":"2020-11-05"}}) - yield ('put_object_lock_configuration', - {"Bucket": "foo", "ObjectLockConfiguration":{}}) - - -@pytest.mark.parametrize("operation, operation_kwargs", _checksum_test_cases()) +CHECKSUM_TEST_CASES = [ + ("put_bucket_tagging", {"Bucket": "foo", "Tagging": {"TagSet": []}}), + ( + "put_bucket_lifecycle", + {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}}, + ), + ( + "put_bucket_lifecycle_configuration", + {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}}, + ), + ("put_bucket_cors", {"Bucket": "foo", "CORSConfiguration": {"CORSRules": []}}), + ("delete_objects", {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}}), + ( + "put_bucket_replication", + {"Bucket": "foo", "ReplicationConfiguration": {"Role": "", "Rules": []}}, + ), + ("put_bucket_acl", {"Bucket": "foo", "AccessControlPolicy": {}}), + ("put_bucket_logging", {"Bucket": "foo", "BucketLoggingStatus": {}}), + ("put_bucket_notification", {"Bucket": "foo", "NotificationConfiguration": {}}), + ("put_bucket_policy", {"Bucket": "foo", "Policy": ""}), + ( + "put_bucket_request_payment", + {"Bucket": "foo", "RequestPaymentConfiguration": {"Payer": ""}}, + ), + ("put_bucket_versioning", {"Bucket": "foo", "VersioningConfiguration": {}}), + ("put_bucket_website", {"Bucket": "foo", "WebsiteConfiguration": {}}), + ("put_object_acl", {"Bucket": "foo", "Key": "bar", "AccessControlPolicy": {}}), + ( + "put_object_legal_hold", + {"Bucket": "foo", "Key": "bar", "LegalHold": {"Status": "ON"}}, + ), + ( + "put_object_retention", + {"Bucket": "foo", "Key": "bar", "Retention": {"RetainUntilDate": "2020-11-05"}}, + ), + ("put_object_lock_configuration", {"Bucket": "foo", "ObjectLockConfiguration": {}}), +] + + +@pytest.mark.parametrize("operation, operation_kwargs", CHECKSUM_TEST_CASES) def test_checksums_included_in_expected_operations(operation, operation_kwargs): """Validate expected calls include Content-MD5 header""" environ = {} - with mock.patch('os.environ', environ): - environ['AWS_ACCESS_KEY_ID'] = 'access_key' - environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' - environ['AWS_CONFIG_FILE'] = 'no-exist-foo' + with mock.patch("os.environ", environ): + environ["AWS_ACCESS_KEY_ID"] = "access_key" + environ["AWS_SECRET_ACCESS_KEY"] = "secret_key" + environ["AWS_CONFIG_FILE"] = "no-exist-foo" session = create_session() - session.config_filename = 'no-exist-foo' - client = session.create_client('s3') + session.config_filename = "no-exist-foo" + client = session.create_client("s3") with ClientHTTPStubber(client) as stub: stub.add_response() call = getattr(client, operation) call(**operation_kwargs) - assert 'Content-MD5' in stub.requests[-1].headers + assert "Content-MD5" in stub.requests[-1].headers def _s3_addressing_test_cases(): # The default behavior for sigv2. DNS compatible buckets - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3.us-west-2.amazonaws.com/key') - yield dict(region='us-east-1', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-west-1', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3.us-west-1.amazonaws.com/key') - yield dict(region='us-west-1', bucket='bucket', key='key', - signature_version='s3', is_secure=False, - expected_url='http://bucket.s3.us-west-1.amazonaws.com/key') + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.us-west-2.amazonaws.com/key", + ) + yield dict( + region="us-east-1", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-west-1", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.us-west-1.amazonaws.com/key", + ) + yield dict( + region="us-west-1", + bucket="bucket", + key="key", + signature_version="s3", + is_secure=False, + expected_url="http://bucket.s3.us-west-1.amazonaws.com/key", + ) # Virtual host addressing is independent of signature version. - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version='s3v4', - expected_url=( - 'https://bucket.s3.us-west-2.amazonaws.com/key')) - yield dict(region='us-east-1', bucket='bucket', key='key', - signature_version='s3v4', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-west-1', bucket='bucket', key='key', - signature_version='s3v4', - expected_url=( - 'https://bucket.s3.us-west-1.amazonaws.com/key')) - yield dict(region='us-west-1', bucket='bucket', key='key', - signature_version='s3v4', is_secure=False, - expected_url=( - 'http://bucket.s3.us-west-1.amazonaws.com/key')) - yield dict( - region='us-west-1', bucket='bucket-with-num-1', key='key', - signature_version='s3v4', is_secure=False, - expected_url='http://bucket-with-num-1.s3.us-west-1.amazonaws.com/key') + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version="s3v4", + expected_url="https://bucket.s3.us-west-2.amazonaws.com/key", + ) + yield dict( + region="us-east-1", + bucket="bucket", + key="key", + signature_version="s3v4", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-west-1", + bucket="bucket", + key="key", + signature_version="s3v4", + expected_url="https://bucket.s3.us-west-1.amazonaws.com/key", + ) + yield dict( + region="us-west-1", + bucket="bucket", + key="key", + signature_version="s3v4", + is_secure=False, + expected_url="http://bucket.s3.us-west-1.amazonaws.com/key", + ) + yield dict( + region="us-west-1", + bucket="bucket-with-num-1", + key="key", + signature_version="s3v4", + is_secure=False, + expected_url="http://bucket-with-num-1.s3.us-west-1.amazonaws.com/key", + ) # Regions outside of the 'aws' partition. # These should still default to virtual hosted addressing # unless explicitly configured otherwise. - yield dict(region='cn-north-1', bucket='bucket', key='key', - signature_version='s3v4', - expected_url=( - 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) + yield dict( + region="cn-north-1", + bucket="bucket", + key="key", + signature_version="s3v4", + expected_url="https://bucket.s3.cn-north-1.amazonaws.com.cn/key", + ) # This isn't actually supported because cn-north-1 is sigv4 only, # but we'll still double check that our internal logic is correct # when building the expected url. - yield dict(region='cn-north-1', bucket='bucket', key='key', - signature_version='s3', - expected_url=( - 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) + yield dict( + region="cn-north-1", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.cn-north-1.amazonaws.com.cn/key", + ) # If the request is unsigned, we should have the default # fix_s3_host behavior which is to use virtual hosting where # possible but fall back to path style when needed. - yield dict(region='cn-north-1', bucket='bucket', key='key', - signature_version=UNSIGNED, - expected_url=( - 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) - yield dict(region='cn-north-1', bucket='bucket.dot', key='key', - signature_version=UNSIGNED, - expected_url=( - 'https://s3.cn-north-1.amazonaws.com.cn/bucket.dot/key')) + yield dict( + region="cn-north-1", + bucket="bucket", + key="key", + signature_version=UNSIGNED, + expected_url="https://bucket.s3.cn-north-1.amazonaws.com.cn/key", + ) + yield dict( + region="cn-north-1", + bucket="bucket.dot", + key="key", + signature_version=UNSIGNED, + expected_url="https://s3.cn-north-1.amazonaws.com.cn/bucket.dot/key", + ) # And of course you can explicitly specify which style to use. - virtual_hosting = {'addressing_style': 'virtual'} - yield dict(region='cn-north-1', bucket='bucket', key='key', - signature_version=UNSIGNED, - s3_config=virtual_hosting, - expected_url=( - 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) - path_style = {'addressing_style': 'path'} - yield dict(region='cn-north-1', bucket='bucket', key='key', - signature_version=UNSIGNED, - s3_config=path_style, - expected_url=( - 'https://s3.cn-north-1.amazonaws.com.cn/bucket/key')) + virtual_hosting = {"addressing_style": "virtual"} + yield dict( + region="cn-north-1", + bucket="bucket", + key="key", + signature_version=UNSIGNED, + s3_config=virtual_hosting, + expected_url="https://bucket.s3.cn-north-1.amazonaws.com.cn/key", + ) + + path_style = {"addressing_style": "path"} + yield dict( + region="cn-north-1", + bucket="bucket", + key="key", + signature_version=UNSIGNED, + s3_config=path_style, + expected_url="https://s3.cn-north-1.amazonaws.com.cn/bucket/key", + ) # If you don't have a DNS compatible bucket, we use path style. yield dict( - region='us-west-2', bucket='bucket.dot', key='key', - expected_url='https://s3.us-west-2.amazonaws.com/bucket.dot/key') + region="us-west-2", + bucket="bucket.dot", + key="key", + expected_url="https://s3.us-west-2.amazonaws.com/bucket.dot/key", + ) yield dict( - region='us-east-1', bucket='bucket.dot', key='key', - expected_url='https://s3.amazonaws.com/bucket.dot/key') + region="us-east-1", + bucket="bucket.dot", + key="key", + expected_url="https://s3.amazonaws.com/bucket.dot/key", + ) yield dict( - region='us-east-1', bucket='BucketName', key='key', - expected_url='https://s3.amazonaws.com/BucketName/key') + region="us-east-1", + bucket="BucketName", + key="key", + expected_url="https://s3.amazonaws.com/BucketName/key", + ) yield dict( - region='us-west-1', bucket='bucket_name', key='key', - expected_url='https://s3.us-west-1.amazonaws.com/bucket_name/key') + region="us-west-1", + bucket="bucket_name", + key="key", + expected_url="https://s3.us-west-1.amazonaws.com/bucket_name/key", + ) yield dict( - region='us-west-1', bucket='-bucket-name', key='key', - expected_url='https://s3.us-west-1.amazonaws.com/-bucket-name/key') + region="us-west-1", + bucket="-bucket-name", + key="key", + expected_url="https://s3.us-west-1.amazonaws.com/-bucket-name/key", + ) yield dict( - region='us-west-1', bucket='bucket-name-', key='key', - expected_url='https://s3.us-west-1.amazonaws.com/bucket-name-/key') + region="us-west-1", + bucket="bucket-name-", + key="key", + expected_url="https://s3.us-west-1.amazonaws.com/bucket-name-/key", + ) yield dict( - region='us-west-1', bucket='aa', key='key', - expected_url='https://s3.us-west-1.amazonaws.com/aa/key') + region="us-west-1", + bucket="aa", + key="key", + expected_url="https://s3.us-west-1.amazonaws.com/aa/key", + ) yield dict( - region='us-west-1', bucket='a'*64, key='key', - expected_url=('https://s3.us-west-1.amazonaws.com/%s/key' % ('a' * 64)) + region="us-west-1", + bucket="a" * 64, + key="key", + expected_url=("https://s3.us-west-1.amazonaws.com/%s/key" % ("a" * 64)), ) # Custom endpoint url should always be used. yield dict( - customer_provided_endpoint='https://my-custom-s3/', - bucket='foo', key='bar', - expected_url='https://my-custom-s3/foo/bar') + customer_provided_endpoint="https://my-custom-s3/", + bucket="foo", + key="bar", + expected_url="https://my-custom-s3/foo/bar", + ) yield dict( - customer_provided_endpoint='https://my-custom-s3/', - bucket='bucket.dots', key='bar', - expected_url='https://my-custom-s3/bucket.dots/bar') + customer_provided_endpoint="https://my-custom-s3/", + bucket="bucket.dots", + key="bar", + expected_url="https://my-custom-s3/bucket.dots/bar", + ) # Doesn't matter what region you specify, a custom endpoint url always # wins. yield dict( - customer_provided_endpoint='https://my-custom-s3/', - region='us-west-2', bucket='foo', key='bar', - expected_url='https://my-custom-s3/foo/bar') + customer_provided_endpoint="https://my-custom-s3/", + region="us-west-2", + bucket="foo", + key="bar", + expected_url="https://my-custom-s3/foo/bar", + ) # Explicitly configuring "virtual" addressing_style. - virtual_hosting = {'addressing_style': 'virtual'} + virtual_hosting = {"addressing_style": "virtual"} yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=virtual_hosting, - expected_url='https://bucket.s3.amazonaws.com/key') + expected_url="https://bucket.s3.amazonaws.com/key", + ) yield dict( - region='us-west-2', bucket='bucket', key='key', + region="us-west-2", + bucket="bucket", + key="key", s3_config=virtual_hosting, - expected_url='https://bucket.s3.us-west-2.amazonaws.com/key') + expected_url="https://bucket.s3.us-west-2.amazonaws.com/key", + ) yield dict( - region='eu-central-1', bucket='bucket', key='key', + region="eu-central-1", + bucket="bucket", + key="key", s3_config=virtual_hosting, - expected_url='https://bucket.s3.eu-central-1.amazonaws.com/key') + expected_url="https://bucket.s3.eu-central-1.amazonaws.com/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=virtual_hosting, - customer_provided_endpoint='https://foo.amazonaws.com', - expected_url='https://bucket.foo.amazonaws.com/key') + customer_provided_endpoint="https://foo.amazonaws.com", + expected_url="https://bucket.foo.amazonaws.com/key", + ) yield dict( - region='unknown', bucket='bucket', key='key', + region="unknown", + bucket="bucket", + key="key", s3_config=virtual_hosting, - expected_url='https://bucket.s3.unknown.amazonaws.com/key') + expected_url="https://bucket.s3.unknown.amazonaws.com/key", + ) # Test us-gov with virtual addressing. yield dict( - region='us-gov-west-1', bucket='bucket', key='key', + region="us-gov-west-1", + bucket="bucket", + key="key", s3_config=virtual_hosting, - expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key') + expected_url="https://bucket.s3.us-gov-west-1.amazonaws.com/key", + ) yield dict( - region='us-gov-west-1', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key') + region="us-gov-west-1", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.us-gov-west-1.amazonaws.com/key", + ) yield dict( - region='fips-us-gov-west-1', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3-fips.us-gov-west-1.amazonaws.com/key') - + region="fips-us-gov-west-1", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3-fips.us-gov-west-1.amazonaws.com/key", + ) # Test path style addressing. - path_style = {'addressing_style': 'path'} + path_style = {"addressing_style": "path"} yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=path_style, - expected_url='https://s3.amazonaws.com/bucket/key') + expected_url="https://s3.amazonaws.com/bucket/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=path_style, - customer_provided_endpoint='https://foo.amazonaws.com/', - expected_url='https://foo.amazonaws.com/bucket/key') + customer_provided_endpoint="https://foo.amazonaws.com/", + expected_url="https://foo.amazonaws.com/bucket/key", + ) yield dict( - region='unknown', bucket='bucket', key='key', + region="unknown", + bucket="bucket", + key="key", s3_config=path_style, - expected_url='https://s3.unknown.amazonaws.com/bucket/key') + expected_url="https://s3.unknown.amazonaws.com/bucket/key", + ) # S3 accelerate - use_accelerate = {'use_accelerate_endpoint': True} + use_accelerate = {"use_accelerate_endpoint": True} yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=use_accelerate, - expected_url='https://bucket.s3-accelerate.amazonaws.com/key') + expected_url="https://bucket.s3-accelerate.amazonaws.com/key", + ) yield dict( # region is ignored with S3 accelerate. - region='us-west-2', bucket='bucket', key='key', + region="us-west-2", + bucket="bucket", + key="key", s3_config=use_accelerate, - expected_url='https://bucket.s3-accelerate.amazonaws.com/key') + expected_url="https://bucket.s3-accelerate.amazonaws.com/key", + ) # Provided endpoints still get recognized as accelerate endpoints. yield dict( - region='us-east-1', bucket='bucket', key='key', - customer_provided_endpoint='https://s3-accelerate.amazonaws.com', - expected_url='https://bucket.s3-accelerate.amazonaws.com/key') + region="us-east-1", + bucket="bucket", + key="key", + customer_provided_endpoint="https://s3-accelerate.amazonaws.com", + expected_url="https://bucket.s3-accelerate.amazonaws.com/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', - customer_provided_endpoint='http://s3-accelerate.amazonaws.com', - expected_url='http://bucket.s3-accelerate.amazonaws.com/key') + region="us-east-1", + bucket="bucket", + key="key", + customer_provided_endpoint="http://s3-accelerate.amazonaws.com", + expected_url="http://bucket.s3-accelerate.amazonaws.com/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', - s3_config=use_accelerate, is_secure=False, + region="us-east-1", + bucket="bucket", + key="key", + s3_config=use_accelerate, + is_secure=False, # Note we're using http:// because is_secure=False. - expected_url='http://bucket.s3-accelerate.amazonaws.com/key') + expected_url="http://bucket.s3-accelerate.amazonaws.com/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", # s3-accelerate must be the first part of the url. - customer_provided_endpoint='https://foo.s3-accelerate.amazonaws.com', - expected_url='https://foo.s3-accelerate.amazonaws.com/bucket/key') + customer_provided_endpoint="https://foo.s3-accelerate.amazonaws.com", + expected_url="https://foo.s3-accelerate.amazonaws.com/bucket/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", # The endpoint must be an Amazon endpoint. - customer_provided_endpoint='https://s3-accelerate.notamazon.com', - expected_url='https://s3-accelerate.notamazon.com/bucket/key') + customer_provided_endpoint="https://s3-accelerate.notamazon.com", + expected_url="https://s3-accelerate.notamazon.com/bucket/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", # Extra components must be whitelisted. - customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com', - expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key') + customer_provided_endpoint="https://s3-accelerate.foo.amazonaws.com", + expected_url="https://s3-accelerate.foo.amazonaws.com/bucket/key", + ) yield dict( - region='unknown', bucket='bucket', key='key', + region="unknown", + bucket="bucket", + key="key", s3_config=use_accelerate, - expected_url='https://bucket.s3-accelerate.amazonaws.com/key') + expected_url="https://bucket.s3-accelerate.amazonaws.com/key", + ) # Use virtual even if path is specified for s3 accelerate because # path style will not work with S3 accelerate. yield dict( - region='us-east-1', bucket='bucket', key='key', - s3_config={'use_accelerate_endpoint': True, - 'addressing_style': 'path'}, - expected_url='https://bucket.s3-accelerate.amazonaws.com/key') + region="us-east-1", + bucket="bucket", + key="key", + s3_config={"use_accelerate_endpoint": True, "addressing_style": "path"}, + expected_url="https://bucket.s3-accelerate.amazonaws.com/key", + ) # S3 dual stack endpoints. - use_dualstack = {'use_dualstack_endpoint': True} + use_dualstack = {"use_dualstack_endpoint": True} yield dict( - region='us-east-1', bucket='bucket', key='key', - s3_config=use_dualstack, signature_version='s3', + region="us-east-1", + bucket="bucket", + key="key", + s3_config=use_dualstack, + signature_version="s3", # Still default to virtual hosted when possible on sigv2. - expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') + expected_url="https://bucket.s3.dualstack.us-east-1.amazonaws.com/key", + ) yield dict( - region=None, bucket='bucket', key='key', + region=None, + bucket="bucket", + key="key", s3_config=use_dualstack, # Uses us-east-1 for no region set. - expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') + expected_url="https://bucket.s3.dualstack.us-east-1.amazonaws.com/key", + ) yield dict( - region='aws-global', bucket='bucket', key='key', + region="aws-global", + bucket="bucket", + key="key", s3_config=use_dualstack, # Pseudo-regions should not have any special resolving logic even when # the endpoint won't work as we do not have the metadata to know that # a region does not support dualstack. So just format it based on the # region name. - expected_url=( - 'https://bucket.s3.dualstack.aws-global.amazonaws.com/key')) + expected_url=("https://bucket.s3.dualstack.aws-global.amazonaws.com/key"), + ) yield dict( - region='us-west-2', bucket='bucket', key='key', - s3_config=use_dualstack, signature_version='s3', + region="us-west-2", + bucket="bucket", + key="key", + s3_config=use_dualstack, + signature_version="s3", # Still default to virtual hosted when possible on sigv2. - expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') + expected_url="https://bucket.s3.dualstack.us-west-2.amazonaws.com/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', - s3_config=use_dualstack, signature_version='s3v4', - expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') + region="us-east-1", + bucket="bucket", + key="key", + s3_config=use_dualstack, + signature_version="s3v4", + expected_url="https://bucket.s3.dualstack.us-east-1.amazonaws.com/key", + ) yield dict( - region='us-west-2', bucket='bucket', key='key', - s3_config=use_dualstack, signature_version='s3v4', - expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') + region="us-west-2", + bucket="bucket", + key="key", + s3_config=use_dualstack, + signature_version="s3v4", + expected_url="https://bucket.s3.dualstack.us-west-2.amazonaws.com/key", + ) yield dict( - region='unknown', bucket='bucket', key='key', - s3_config=use_dualstack, signature_version='s3v4', - expected_url='https://bucket.s3.dualstack.unknown.amazonaws.com/key') + region="unknown", + bucket="bucket", + key="key", + s3_config=use_dualstack, + signature_version="s3v4", + expected_url="https://bucket.s3.dualstack.unknown.amazonaws.com/key", + ) # Non DNS compatible buckets use path style for dual stack. yield dict( - region='us-west-2', bucket='bucket.dot', key='key', + region="us-west-2", + bucket="bucket.dot", + key="key", s3_config=use_dualstack, # Still default to virtual hosted when possible. - expected_url=( - 'https://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key')) + expected_url=("https://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key"), + ) # Supports is_secure (use_ssl=False in create_client()). yield dict( - region='us-west-2', bucket='bucket.dot', key='key', is_secure=False, + region="us-west-2", + bucket="bucket.dot", + key="key", + is_secure=False, s3_config=use_dualstack, # Still default to virtual hosted when possible. - expected_url=( - 'http://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key')) + expected_url=("http://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key"), + ) # Is path style is requested, we should use it, even if the bucket is # DNS compatible. force_path_style = { - 'use_dualstack_endpoint': True, - 'addressing_style': 'path', + "use_dualstack_endpoint": True, + "addressing_style": "path", } yield dict( - region='us-west-2', bucket='bucket', key='key', + region="us-west-2", + bucket="bucket", + key="key", s3_config=force_path_style, # Still default to virtual hosted when possible. - expected_url='https://s3.dualstack.us-west-2.amazonaws.com/bucket/key') + expected_url="https://s3.dualstack.us-west-2.amazonaws.com/bucket/key", + ) # Accelerate + dual stack use_accelerate_dualstack = { - 'use_accelerate_endpoint': True, - 'use_dualstack_endpoint': True, + "use_accelerate_endpoint": True, + "use_dualstack_endpoint": True, } yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=use_accelerate_dualstack, - expected_url=( - 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) + expected_url=("https://bucket.s3-accelerate.dualstack.amazonaws.com/key"), + ) yield dict( # Region is ignored with S3 accelerate. - region='us-west-2', bucket='bucket', key='key', + region="us-west-2", + bucket="bucket", + key="key", s3_config=use_accelerate_dualstack, - expected_url=( - 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) + expected_url=("https://bucket.s3-accelerate.dualstack.amazonaws.com/key"), + ) # Only s3-accelerate overrides a customer endpoint. yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=use_dualstack, - customer_provided_endpoint='https://s3-accelerate.amazonaws.com', - expected_url=( - 'https://bucket.s3-accelerate.amazonaws.com/key')) + customer_provided_endpoint="https://s3-accelerate.amazonaws.com", + expected_url=("https://bucket.s3-accelerate.amazonaws.com/key"), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", # Dualstack is whitelisted. - customer_provided_endpoint=( - 'https://s3-accelerate.dualstack.amazonaws.com'), - expected_url=( - 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) + customer_provided_endpoint=("https://s3-accelerate.dualstack.amazonaws.com"), + expected_url=("https://bucket.s3-accelerate.dualstack.amazonaws.com/key"), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", # Even whitelisted parts cannot be duplicated. customer_provided_endpoint=( - 'https://s3-accelerate.dualstack.dualstack.amazonaws.com'), + "https://s3-accelerate.dualstack.dualstack.amazonaws.com" + ), expected_url=( - 'https://s3-accelerate.dualstack.dualstack' - '.amazonaws.com/bucket/key')) + "https://s3-accelerate.dualstack.dualstack" ".amazonaws.com/bucket/key" + ), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", # More than two extra parts is not allowed. customer_provided_endpoint=( - 'https://s3-accelerate.dualstack.dualstack.dualstack' - '.amazonaws.com'), + "https://s3-accelerate.dualstack.dualstack.dualstack" ".amazonaws.com" + ), expected_url=( - 'https://s3-accelerate.dualstack.dualstack.dualstack.amazonaws.com' - '/bucket/key')) + "https://s3-accelerate.dualstack.dualstack.dualstack.amazonaws.com" + "/bucket/key" + ), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", # Extra components must be whitelisted. - customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com', - expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key') + customer_provided_endpoint="https://s3-accelerate.foo.amazonaws.com", + expected_url="https://s3-accelerate.foo.amazonaws.com/bucket/key", + ) yield dict( - region='us-east-1', bucket='bucket', key='key', - s3_config=use_accelerate_dualstack, is_secure=False, + region="us-east-1", + bucket="bucket", + key="key", + s3_config=use_accelerate_dualstack, + is_secure=False, # Note we're using http:// because is_secure=False. - expected_url=( - 'http://bucket.s3-accelerate.dualstack.amazonaws.com/key')) + expected_url=("http://bucket.s3-accelerate.dualstack.amazonaws.com/key"), + ) # Use virtual even if path is specified for s3 accelerate because # path style will not work with S3 accelerate. - use_accelerate_dualstack['addressing_style'] = 'path' + use_accelerate_dualstack["addressing_style"] = "path" yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=use_accelerate_dualstack, - expected_url=( - 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) + expected_url=("https://bucket.s3-accelerate.dualstack.amazonaws.com/key"), + ) # Access-point arn cases - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" yield dict( - region='us-west-2', bucket=accesspoint_arn, key='key', + region="us-west-2", + bucket=accesspoint_arn, + key="key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='us-west-2', bucket=accesspoint_arn, key='key', - s3_config={'use_arn_region': True}, + region="us-west-2", + bucket=accesspoint_arn, + key="key", + s3_config={"use_arn_region": True}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='us-west-2', bucket=accesspoint_arn, key='myendpoint/key', + region="us-west-2", + bucket=accesspoint_arn, + key="myendpoint/key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/myendpoint/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/myendpoint/key" + ), ) yield dict( - region='us-west-2', bucket=accesspoint_arn, key='foo/myendpoint/key', + region="us-west-2", + bucket=accesspoint_arn, + key="foo/myendpoint/key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/foo/myendpoint/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/foo/myendpoint/key" + ), ) yield dict( # Note: The access-point arn has us-west-2 and the client's region is # us-east-1, for the defauldict the access-point arn region is used. - region='us-east-1', bucket=accesspoint_arn, key='key', + region="us-east-1", + bucket=accesspoint_arn, + key="key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='us-east-1', bucket=accesspoint_arn, key='key', - s3_config={'use_arn_region': False}, + region="us-east-1", + bucket=accesspoint_arn, + key="key", + s3_config={"use_arn_region": False}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-east-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-east-1.amazonaws.com/key" + ), ) yield dict( - region='s3-external-1', bucket=accesspoint_arn, key='key', - s3_config={'use_arn_region': True}, + region="s3-external-1", + bucket=accesspoint_arn, + key="key", + s3_config={"use_arn_region": True}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='aws-global', bucket=accesspoint_arn, key='key', - s3_config={'use_arn_region': True}, + region="aws-global", + bucket=accesspoint_arn, + key="key", + s3_config={"use_arn_region": True}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='unknown', bucket=accesspoint_arn, key='key', - s3_config={'use_arn_region': False}, + region="unknown", + bucket=accesspoint_arn, + key="key", + s3_config={"use_arn_region": False}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'unknown.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "unknown.amazonaws.com/key" + ), ) yield dict( - region='unknown', bucket=accesspoint_arn, key='key', - s3_config={'use_arn_region': True}, + region="unknown", + bucket=accesspoint_arn, + key="key", + s3_config={"use_arn_region": True}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) - ) - accesspoint_arn_cn = ( - 'arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint' + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) + accesspoint_arn_cn = "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint" yield dict( - region='cn-north-1', bucket=accesspoint_arn_cn, key='key', + region="cn-north-1", + bucket=accesspoint_arn_cn, + key="key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'cn-north-1.amazonaws.com.cn/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "cn-north-1.amazonaws.com.cn/key" + ), ) yield dict( - region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key', + region="cn-northwest-1", + bucket=accesspoint_arn_cn, + key="key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'cn-north-1.amazonaws.com.cn/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "cn-north-1.amazonaws.com.cn/key" + ), ) yield dict( - region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key', - s3_config={'use_arn_region': False}, + region="cn-northwest-1", + bucket=accesspoint_arn_cn, + key="key", + s3_config={"use_arn_region": False}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'cn-northwest-1.amazonaws.com.cn/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "cn-northwest-1.amazonaws.com.cn/key" + ), ) accesspoint_arn_gov = ( - 'arn:aws-us-gov:s3:us-gov-west-1:123456789012:accesspoint:myendpoint' + "arn:aws-us-gov:s3:us-gov-west-1:123456789012:accesspoint:myendpoint" ) yield dict( - region='us-gov-west-1', bucket=accesspoint_arn_gov, key='key', + region="us-gov-west-1", + bucket=accesspoint_arn_gov, + key="key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-gov-west-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-gov-west-1.amazonaws.com/key" + ), ) yield dict( - region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key', + region="fips-us-gov-west-1", + bucket=accesspoint_arn_gov, + key="key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint-fips.' - 'us-gov-west-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint-fips." + "us-gov-west-1.amazonaws.com/key" + ), ) yield dict( - region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key', - s3_config={'use_arn_region': False}, + region="fips-us-gov-west-1", + bucket=accesspoint_arn_gov, + key="key", + s3_config={"use_arn_region": False}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint-fips.' - 'fips-us-gov-west-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint-fips." + "fips-us-gov-west-1.amazonaws.com/key" + ), ) yield dict( - region='us-west-2', bucket=accesspoint_arn, key='key', is_secure=False, + region="us-west-2", + bucket=accesspoint_arn, + key="key", + is_secure=False, expected_url=( - 'http://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "http://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) # Dual-stack with access-point arn yield dict( # Note: The access-point arn has us-west-2 and the client's region is # us-east-1, for the defauldict the access-point arn region is used. - region='us-east-1', bucket=accesspoint_arn, key='key', + region="us-east-1", + bucket=accesspoint_arn, + key="key", s3_config={ - 'use_dualstack_endpoint': True, + "use_dualstack_endpoint": True, }, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.dualstack.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint.dualstack." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='us-east-1', bucket=accesspoint_arn, key='key', - s3_config={ - 'use_dualstack_endpoint': True, - 'use_arn_region': False - }, + region="us-east-1", + bucket=accesspoint_arn, + key="key", + s3_config={"use_dualstack_endpoint": True, "use_arn_region": False}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.dualstack.' - 'us-east-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint.dualstack." + "us-east-1.amazonaws.com/key" + ), ) yield dict( - region='us-gov-west-1', bucket=accesspoint_arn_gov, key='key', + region="us-gov-west-1", + bucket=accesspoint_arn_gov, + key="key", s3_config={ - 'use_dualstack_endpoint': True, + "use_dualstack_endpoint": True, }, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.dualstack.' - 'us-gov-west-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint.dualstack." + "us-gov-west-1.amazonaws.com/key" + ), ) yield dict( - region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key', + region="fips-us-gov-west-1", + bucket=accesspoint_arn_gov, + key="key", s3_config={ - 'use_arn_region': True, - 'use_dualstack_endpoint': True, + "use_arn_region": True, + "use_dualstack_endpoint": True, }, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint-fips.dualstack.' - 'us-gov-west-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint-fips.dualstack." + "us-gov-west-1.amazonaws.com/key" + ), ) # None of the various s3 settings related to paths should affect what # endpoint to use when an access-point is provided. yield dict( - region='us-west-2', bucket=accesspoint_arn, key='key', - s3_config={'adressing_style': 'auto'}, + region="us-west-2", + bucket=accesspoint_arn, + key="key", + s3_config={"adressing_style": "auto"}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='us-west-2', bucket=accesspoint_arn, key='key', - s3_config={'adressing_style': 'virtual'}, + region="us-west-2", + bucket=accesspoint_arn, + key="key", + s3_config={"adressing_style": "virtual"}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='us-west-2', bucket=accesspoint_arn, key='key', - s3_config={'adressing_style': 'path'}, + region="us-west-2", + bucket=accesspoint_arn, + key="key", + s3_config={"adressing_style": "path"}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) # Use us-east-1 regional endpoindicts: regional - us_east_1_regional_endpoint = { - 'us_east_1_regional_endpoint': 'regional' - } + us_east_1_regional_endpoint = {"us_east_1_regional_endpoint": "regional"} yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=us_east_1_regional_endpoint, - expected_url=( - 'https://bucket.s3.us-east-1.amazonaws.com/key')) + expected_url=("https://bucket.s3.us-east-1.amazonaws.com/key"), + ) yield dict( - region='us-west-2', bucket='bucket', key='key', + region="us-west-2", + bucket="bucket", + key="key", s3_config=us_east_1_regional_endpoint, - expected_url=( - 'https://bucket.s3.us-west-2.amazonaws.com/key')) + expected_url=("https://bucket.s3.us-west-2.amazonaws.com/key"), + ) yield dict( - region=None, bucket='bucket', key='key', + region=None, + bucket="bucket", + key="key", s3_config=us_east_1_regional_endpoint, - expected_url=( - 'https://bucket.s3.amazonaws.com/key')) + expected_url=("https://bucket.s3.amazonaws.com/key"), + ) yield dict( - region='unknown', bucket='bucket', key='key', + region="unknown", + bucket="bucket", + key="key", s3_config=us_east_1_regional_endpoint, - expected_url=( - 'https://bucket.s3.unknown.amazonaws.com/key')) + expected_url=("https://bucket.s3.unknown.amazonaws.com/key"), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config={ - 'us_east_1_regional_endpoint': 'regional', - 'use_dualstack_endpoint': True, + "us_east_1_regional_endpoint": "regional", + "use_dualstack_endpoint": True, }, - expected_url=( - 'https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')) + expected_url=("https://bucket.s3.dualstack.us-east-1.amazonaws.com/key"), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config={ - 'us_east_1_regional_endpoint': 'regional', - 'use_accelerate_endpoint': True, + "us_east_1_regional_endpoint": "regional", + "use_accelerate_endpoint": True, }, - expected_url=( - 'https://bucket.s3-accelerate.amazonaws.com/key')) + expected_url=("https://bucket.s3-accelerate.amazonaws.com/key"), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config={ - 'us_east_1_regional_endpoint': 'regional', - 'use_accelerate_endpoint': True, - 'use_dualstack_endpoint': True, + "us_east_1_regional_endpoint": "regional", + "use_accelerate_endpoint": True, + "use_dualstack_endpoint": True, }, - expected_url=( - 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) + expected_url=("https://bucket.s3-accelerate.dualstack.amazonaws.com/key"), + ) # Use us-east-1 regional endpoindicts: legacy - us_east_1_regional_endpoint_legacy = { - 'us_east_1_regional_endpoint': 'legacy' - } + us_east_1_regional_endpoint_legacy = {"us_east_1_regional_endpoint": "legacy"} yield dict( - region='us-east-1', bucket='bucket', key='key', + region="us-east-1", + bucket="bucket", + key="key", s3_config=us_east_1_regional_endpoint_legacy, - expected_url=( - 'https://bucket.s3.amazonaws.com/key')) + expected_url=("https://bucket.s3.amazonaws.com/key"), + ) yield dict( - region=None, bucket='bucket', key='key', + region=None, + bucket="bucket", + key="key", s3_config=us_east_1_regional_endpoint_legacy, - expected_url=( - 'https://bucket.s3.amazonaws.com/key')) + expected_url=("https://bucket.s3.amazonaws.com/key"), + ) yield dict( - region='unknown', bucket='bucket', key='key', + region="unknown", + bucket="bucket", + key="key", s3_config=us_east_1_regional_endpoint_legacy, - expected_url=( - 'https://bucket.s3.unknown.amazonaws.com/key')) + expected_url=("https://bucket.s3.unknown.amazonaws.com/key"), + ) s3_object_lambda_arn_gov = ( - 'arn:aws-us-gov:s3-object-lambda:us-gov-west-1:' - '123456789012:accesspoint:mybanner' + "arn:aws-us-gov:s3-object-lambda:us-gov-west-1:" + "123456789012:accesspoint:mybanner" ) yield dict( - region='fips-us-gov-west-1', bucket=s3_object_lambda_arn_gov, key='key', + region="fips-us-gov-west-1", + bucket=s3_object_lambda_arn_gov, + key="key", expected_url=( - 'https://mybanner-123456789012.s3-object-lambda-fips.' - 'us-gov-west-1.amazonaws.com/key' - ) + "https://mybanner-123456789012.s3-object-lambda-fips." + "us-gov-west-1.amazonaws.com/key" + ), ) s3_object_lambda_arn = ( - 'arn:aws:s3-object-lambda:us-east-1:' - '123456789012:accesspoint:mybanner' + "arn:aws:s3-object-lambda:us-east-1:" "123456789012:accesspoint:mybanner" ) yield dict( - region='aws-global', bucket=s3_object_lambda_arn, key='key', - s3_config={'use_arn_region': True}, + region="aws-global", + bucket=s3_object_lambda_arn, + key="key", + s3_config={"use_arn_region": True}, expected_url=( - 'https://mybanner-123456789012.s3-object-lambda.' - 'us-east-1.amazonaws.com/key' - ) + "https://mybanner-123456789012.s3-object-lambda." + "us-east-1.amazonaws.com/key" + ), ) @@ -2548,167 +2654,248 @@ def test_correct_url_used_for_s3(test_case): def _verify_expected_endpoint_url( - region=None, bucket='bucket', key='key', s3_config=None, is_secure=True, - customer_provided_endpoint=None, expected_url=None, signature_version=None + region=None, + bucket="bucket", + key="key", + s3_config=None, + is_secure=True, + customer_provided_endpoint=None, + expected_url=None, + signature_version=None, ): environ = {} - with mock.patch('os.environ', environ): - environ['AWS_ACCESS_KEY_ID'] = 'access_key' - environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' - environ['AWS_CONFIG_FILE'] = 'no-exist-foo' - environ['AWS_SHARED_CREDENTIALS_FILE'] = 'no-exist-foo' + with mock.patch("os.environ", environ): + environ["AWS_ACCESS_KEY_ID"] = "access_key" + environ["AWS_SECRET_ACCESS_KEY"] = "secret_key" + environ["AWS_CONFIG_FILE"] = "no-exist-foo" + environ["AWS_SHARED_CREDENTIALS_FILE"] = "no-exist-foo" session = create_session() - session.config_filename = 'no-exist-foo' - config = Config( - signature_version=signature_version, - s3=s3_config + session.config_filename = "no-exist-foo" + config = Config(signature_version=signature_version, s3=s3_config) + s3 = session.create_client( + "s3", + region_name=region, + use_ssl=is_secure, + config=config, + endpoint_url=customer_provided_endpoint, ) - s3 = session.create_client('s3', region_name=region, use_ssl=is_secure, - config=config, - endpoint_url=customer_provided_endpoint) with ClientHTTPStubber(s3) as http_stubber: http_stubber.add_response() - s3.put_object(Bucket=bucket, Key=key, Body=b'bar') + s3.put_object(Bucket=bucket, Key=key, Body=b"bar") assert http_stubber.requests[0].url == expected_url -def _create_s3_client(region, is_secure, endpoint_url, s3_config, - signature_version): +def _create_s3_client(region, is_secure, endpoint_url, s3_config, signature_version): environ = {} - with mock.patch('os.environ', environ): - environ['AWS_ACCESS_KEY_ID'] = 'access_key' - environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' - environ['AWS_CONFIG_FILE'] = 'no-exist-foo' - environ['AWS_SHARED_CREDENTIALS_FILE'] = 'no-exist-foo' + with mock.patch("os.environ", environ): + environ["AWS_ACCESS_KEY_ID"] = "access_key" + environ["AWS_SECRET_ACCESS_KEY"] = "secret_key" + environ["AWS_CONFIG_FILE"] = "no-exist-foo" + environ["AWS_SHARED_CREDENTIALS_FILE"] = "no-exist-foo" session = create_session() - session.config_filename = 'no-exist-foo' - config = Config( - signature_version=signature_version, - s3=s3_config + session.config_filename = "no-exist-foo" + config = Config(signature_version=signature_version, s3=s3_config) + s3 = session.create_client( + "s3", + region_name=region, + use_ssl=is_secure, + config=config, + endpoint_url=endpoint_url, ) - s3 = session.create_client('s3', region_name=region, use_ssl=is_secure, - config=config, - endpoint_url=endpoint_url) return s3 - def _addressing_for_presigned_url_test_cases(): # us-east-1, or the "global" endpoint. A signature version of # None means the user doesn't have signature version configured. - yield dict(region='us-east-1', bucket='bucket', key='key', - signature_version=None, - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-east-1', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-east-1', bucket='bucket', key='key', - signature_version='s3v4', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-east-1', bucket='bucket', key='key', - signature_version='s3v4', - s3_config={'addressing_style': 'path'}, - expected_url='https://s3.amazonaws.com/bucket/key') + yield dict( + region="us-east-1", + bucket="bucket", + key="key", + signature_version=None, + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-east-1", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-east-1", + bucket="bucket", + key="key", + signature_version="s3v4", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-east-1", + bucket="bucket", + key="key", + signature_version="s3v4", + s3_config={"addressing_style": "path"}, + expected_url="https://s3.amazonaws.com/bucket/key", + ) # A region that supports both 's3' and 's3v4'. - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version=None, - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version='s3v4', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version='s3v4', - s3_config={'addressing_style': 'path'}, - expected_url='https://s3.us-west-2.amazonaws.com/bucket/key') + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version=None, + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version="s3v4", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version="s3v4", + s3_config={"addressing_style": "path"}, + expected_url="https://s3.us-west-2.amazonaws.com/bucket/key", + ) # An 's3v4' only region. - yield dict(region='us-east-2', bucket='bucket', key='key', - signature_version=None, - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-east-2', bucket='bucket', key='key', - signature_version='s3', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-east-2', bucket='bucket', key='key', - signature_version='s3v4', - expected_url='https://bucket.s3.amazonaws.com/key') - yield dict(region='us-east-2', bucket='bucket', key='key', - signature_version='s3v4', - s3_config={'addressing_style': 'path'}, - expected_url='https://s3.us-east-2.amazonaws.com/bucket/key') + yield dict( + region="us-east-2", + bucket="bucket", + key="key", + signature_version=None, + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-east-2", + bucket="bucket", + key="key", + signature_version="s3", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-east-2", + bucket="bucket", + key="key", + signature_version="s3v4", + expected_url="https://bucket.s3.amazonaws.com/key", + ) + yield dict( + region="us-east-2", + bucket="bucket", + key="key", + signature_version="s3v4", + s3_config={"addressing_style": "path"}, + expected_url="https://s3.us-east-2.amazonaws.com/bucket/key", + ) # Dualstack endpoints yield dict( - region='us-west-2', bucket='bucket', key='key', + region="us-west-2", + bucket="bucket", + key="key", signature_version=None, - s3_config={'use_dualstack_endpoint': True}, - expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') + s3_config={"use_dualstack_endpoint": True}, + expected_url="https://bucket.s3.dualstack.us-west-2.amazonaws.com/key", + ) yield dict( - region='us-west-2', bucket='bucket', key='key', - signature_version='s3', - s3_config={'use_dualstack_endpoint': True}, - expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') + region="us-west-2", + bucket="bucket", + key="key", + signature_version="s3", + s3_config={"use_dualstack_endpoint": True}, + expected_url="https://bucket.s3.dualstack.us-west-2.amazonaws.com/key", + ) yield dict( - region='us-west-2', bucket='bucket', key='key', - signature_version='s3v4', - s3_config={'use_dualstack_endpoint': True}, - expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') + region="us-west-2", + bucket="bucket", + key="key", + signature_version="s3v4", + s3_config={"use_dualstack_endpoint": True}, + expected_url="https://bucket.s3.dualstack.us-west-2.amazonaws.com/key", + ) # Accelerate - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version=None, - s3_config={'use_accelerate_endpoint': True}, - expected_url='https://bucket.s3-accelerate.amazonaws.com/key') + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version=None, + s3_config={"use_accelerate_endpoint": True}, + expected_url="https://bucket.s3-accelerate.amazonaws.com/key", + ) # A region that we don't know about. - yield dict(region='us-west-50', bucket='bucket', key='key', - signature_version=None, - expected_url='https://bucket.s3.amazonaws.com/key') + yield dict( + region="us-west-50", + bucket="bucket", + key="key", + signature_version=None, + expected_url="https://bucket.s3.amazonaws.com/key", + ) # Customer provided URL results in us leaving the host untouched. - yield dict(region='us-west-2', bucket='bucket', key='key', - signature_version=None, - customer_provided_endpoint='https://foo.com/', - expected_url='https://foo.com/bucket/key') + yield dict( + region="us-west-2", + bucket="bucket", + key="key", + signature_version=None, + customer_provided_endpoint="https://foo.com/", + expected_url="https://foo.com/bucket/key", + ) # Access-point - accesspoint_arn = ( - 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' - ) + accesspoint_arn = "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint" yield dict( - region='us-west-2', bucket=accesspoint_arn, key='key', + region="us-west-2", + bucket=accesspoint_arn, + key="key", expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-west-2.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-west-2.amazonaws.com/key" + ), ) yield dict( - region='us-east-1', bucket=accesspoint_arn, key='key', - s3_config={'use_arn_region': False}, + region="us-east-1", + bucket=accesspoint_arn, + key="key", + s3_config={"use_arn_region": False}, expected_url=( - 'https://myendpoint-123456789012.s3-accesspoint.' - 'us-east-1.amazonaws.com/key' - ) + "https://myendpoint-123456789012.s3-accesspoint." + "us-east-1.amazonaws.com/key" + ), ) # Use us-east-1 regional endpoint configuration cases - us_east_1_regional_endpoint = { - 'us_east_1_regional_endpoint': 'regional' - } + us_east_1_regional_endpoint = {"us_east_1_regional_endpoint": "regional"} yield dict( - region='us-east-1', bucket='bucket', key='key', - s3_config=us_east_1_regional_endpoint, signature_version='s3', - expected_url=( - 'https://bucket.s3.us-east-1.amazonaws.com/key')) + region="us-east-1", + bucket="bucket", + key="key", + s3_config=us_east_1_regional_endpoint, + signature_version="s3", + expected_url=("https://bucket.s3.us-east-1.amazonaws.com/key"), + ) yield dict( - region='us-east-1', bucket='bucket', key='key', - s3_config=us_east_1_regional_endpoint, signature_version='s3v4', - expected_url=( - 'https://bucket.s3.us-east-1.amazonaws.com/key')) + region="us-east-1", + bucket="bucket", + key="key", + s3_config=us_east_1_regional_endpoint, + signature_version="s3v4", + expected_url=("https://bucket.s3.us-east-1.amazonaws.com/key"), + ) @pytest.mark.parametrize("test_case", _addressing_for_presigned_url_test_cases()) @@ -2719,19 +2906,27 @@ def test_addressing_for_presigned_urls(test_case): def _verify_presigned_url_addressing( - region=None, bucket='bucket', key='key', s3_config=None, is_secure=True, - customer_provided_endpoint=None, expected_url=None, signature_version=None + region=None, + bucket="bucket", + key="key", + s3_config=None, + is_secure=True, + customer_provided_endpoint=None, + expected_url=None, + signature_version=None, ): - s3 = _create_s3_client(region=region, is_secure=is_secure, - endpoint_url=customer_provided_endpoint, - s3_config=s3_config, - signature_version=signature_version) - url = s3.generate_presigned_url( - 'get_object', {'Bucket': bucket, 'Key': key}) + s3 = _create_s3_client( + region=region, + is_secure=is_secure, + endpoint_url=customer_provided_endpoint, + s3_config=s3_config, + signature_version=signature_version, + ) + url = s3.generate_presigned_url("get_object", {"Bucket": bucket, "Key": key}) # We're not trying to verify the params for URL presigning, # those are tested elsewhere. We just care about the hostname/path. parts = urlsplit(url) - actual = '%s://%s%s' % parts[:3] + actual = "%s://%s%s" % parts[:3] assert actual == expected_url @@ -2739,35 +2934,35 @@ class TestS3XMLPayloadEscape(BaseS3OperationTest): def assert_correct_content_md5(self, request): content_md5_bytes = get_md5(request.body).digest() content_md5 = base64.b64encode(content_md5_bytes) - self.assertEqual(content_md5, request.headers['Content-MD5']) + self.assertEqual(content_md5, request.headers["Content-MD5"]) def test_escape_keys_in_xml_delete_objects(self): self.http_stubber.add_response() with self.http_stubber: - response = self.client.delete_objects( - Bucket='mybucket', - Delete={ - 'Objects': [{'Key': 'some\r\n\rkey'}] - }, + self.client.delete_objects( + Bucket="mybucket", + Delete={"Objects": [{"Key": "some\r\n\rkey"}]}, ) request = self.http_stubber.requests[0] - self.assertNotIn(b'\r\n\r', request.body) - self.assertIn(b' ', request.body) + self.assertNotIn(b"\r\n\r", request.body) + self.assertIn(b" ", request.body) self.assert_correct_content_md5(request) def test_escape_keys_in_xml_put_bucket_lifecycle_configuration(self): self.http_stubber.add_response() with self.http_stubber: - response = self.client.put_bucket_lifecycle_configuration( - Bucket='mybucket', + self.client.put_bucket_lifecycle_configuration( + Bucket="mybucket", LifecycleConfiguration={ - 'Rules': [{ - 'Prefix': 'my\r\n\rprefix', - 'Status': 'ENABLED', - }] - } + "Rules": [ + { + "Prefix": "my\r\n\rprefix", + "Status": "ENABLED", + } + ] + }, ) request = self.http_stubber.requests[0] - self.assertNotIn(b'my\r\n\rprefix', request.body) - self.assertIn(b'my prefix', request.body) + self.assertNotIn(b"my\r\n\rprefix", request.body) + self.assertIn(b"my prefix", request.body) self.assert_correct_content_md5(request) diff --git a/tests/functional/test_s3_control.py b/tests/functional/test_s3_control.py index e45a0577bc..afb7e171d9 100644 --- a/tests/functional/test_s3_control.py +++ b/tests/functional/test_s3_control.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import unittest, mock, BaseSessionTest, create_session +from tests import mock, BaseSessionTest from botocore.config import Config from botocore.awsrequest import AWSResponse diff --git a/tests/functional/test_s3_control_redirects.py b/tests/functional/test_s3_control_redirects.py index 13e3175f92..aa95c50c44 100644 --- a/tests/functional/test_s3_control_redirects.py +++ b/tests/functional/test_s3_control_redirects.py @@ -15,7 +15,7 @@ import pytest -from tests import unittest, mock, BaseSessionTest, ClientHTTPStubber +from tests import unittest, ClientHTTPStubber from botocore import exceptions from botocore.exceptions import ( @@ -27,7 +27,6 @@ from botocore.session import Session from botocore.compat import urlsplit from botocore.config import Config -from botocore.awsrequest import AWSResponse ACCESSPOINT_ARN_TEST_CASES = [ @@ -98,14 +97,14 @@ 'exception': 'UnsupportedS3ControlConfigurationError', } }, - #{ + # { # 'arn': 'arn:aws-us-gov:s3-outposts:fips-us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint', # 'region': 'fips-us-gov-east-1', # 'config': {'s3': {'use_arn_region': True}}, # 'assertions': { # 'exception': 'UnsupportedS3ArnError', # } - #}, + # }, { 'arn': 'arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint', 'region': 'us-gov-east-1-fips', diff --git a/tests/functional/test_session.py b/tests/functional/test_session.py index 6c098be83a..16327bf815 100644 --- a/tests/functional/test_session.py +++ b/tests/functional/test_session.py @@ -33,9 +33,9 @@ def test_profile_precedence(self): def test_credentials_with_profile_precedence(self): self.environ['AWS_PROFILE'] = 'from_env_var' - self.session.set_config_variable('profile', 'from_session_instance') + self.session.set_config_variable('profile', 'from_session_instance') try: - creds = self.session.get_credentials() + self.session.get_credentials() except ProfileNotFound as e: self.assertNotIn('from_env_var', str(e)) self.assertIn('from_session_instance', str(e)) diff --git a/tests/functional/test_six_imports.py b/tests/functional/test_six_imports.py index 1151f08509..bbf6443041 100644 --- a/tests/functional/test_six_imports.py +++ b/tests/functional/test_six_imports.py @@ -24,7 +24,7 @@ def test_no_bare_six_imports(filename): with open(filename) as f: contents = f.read() parsed = ast.parse(contents, filename) - checker = SixImportChecker(filename).visit(parsed) + SixImportChecker(filename).visit(parsed) class SixImportChecker(ast.NodeVisitor): diff --git a/tests/functional/test_six_threading.py b/tests/functional/test_six_threading.py index 30f4866b9c..6db896aef7 100644 --- a/tests/functional/test_six_threading.py +++ b/tests/functional/test_six_threading.py @@ -24,11 +24,8 @@ def _reload_six(): # Issue #98 is caused by a race condition in six._LazyDescr.__get__ # which is only called once per moved module. Reload six so all the # moved modules are reset. - if sys.version_info < (3, 0): - reload(six) - else: - import importlib - importlib.reload(six) + import importlib + importlib.reload(six) class _ExampleThread(threading.Thread): diff --git a/tests/functional/test_sts.py b/tests/functional/test_sts.py index 6ae7c7b281..2e0bdaec46 100644 --- a/tests/functional/test_sts.py +++ b/tests/functional/test_sts.py @@ -255,7 +255,8 @@ def test_client_for_unknown_region_with_legacy_configured(self): self.environ['AWS_STS_REGIONAL_ENDPOINTS'] = 'legacy' sts = self.create_sts_client('not-real') self.assert_request_sent( - sts,expected_url='https://sts.not-real.amazonaws.com/') + sts, expected_url='https://sts.not-real.amazonaws.com/' + ) def test_client_for_unknown_region_with_regional_configured(self): self.environ['AWS_STS_REGIONAL_ENDPOINTS'] = 'regional' diff --git a/tests/functional/test_stub.py b/tests/functional/test_stub.py index adedc70ac1..46aba3fe12 100644 --- a/tests/functional/test_stub.py +++ b/tests/functional/test_stub.py @@ -309,7 +309,8 @@ def test_can_stub_with_presign_url_mixed_in(self): } ) self.assertEqual( - url, 'https://s3.amazonaws.com/myotherbucket/myotherkey') + url, 'https://s3.amazonaws.com/myotherbucket/myotherkey' + ) actual_response = self.client.list_objects(**expected_params) self.assertEqual(desired_response, actual_response) self.stubber.assert_no_pending_responses() @@ -326,8 +327,7 @@ def test_parse_get_bucket_location(self): def test_parse_get_bucket_location_returns_response(self): service_response = {"LocationConstraint": "us-west-2"} - self.stubber.add_response('get_bucket_location',service_response) + self.stubber.add_response('get_bucket_location', service_response) self.stubber.activate() response = self.client.get_bucket_location(Bucket='foo') self.assertEqual(response, service_response) - diff --git a/tests/functional/test_utils.py b/tests/functional/test_utils.py index 0f0b3e85e4..2cbec4d3bd 100644 --- a/tests/functional/test_utils.py +++ b/tests/functional/test_utils.py @@ -15,8 +15,9 @@ import shutil from tests import unittest, mock -from botocore.exceptions import (ConnectionClosedError, HTTPClientError, - InvalidIMDSEndpointError) +from botocore.exceptions import ( + ConnectionClosedError, HTTPClientError, InvalidIMDSEndpointError +) from botocore.utils import FileWebIdentityTokenLoader, InstanceMetadataFetcher from urllib3.exceptions import LocationParseError diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py index d2b65d9ab6..b1705463cf 100644 --- a/tests/functional/test_waiter_config.py +++ b/tests/functional/test_waiter_config.py @@ -89,7 +89,6 @@ def _waiter_configs(): validator = Draft4Validator(WAITER_SCHEMA) for service_name in session.get_available_services(): client = session.create_client(service_name, 'us-east-1') - service_model = client.meta.service_model try: # We use the loader directly here because we need the entire # json document, not just the portions exposed (either @@ -164,14 +163,16 @@ def _validate_acceptor(acceptor, op_model, waiter_name): # check a few things about this returned search result. search_result = _search_jmespath_expression(expression, op_model) if search_result is None: - raise AssertionError("JMESPath expression did not match " - "anything for waiter '%s': %s" - % (waiter_name, expression)) + raise AssertionError( + f"JMESPath expression did not match anything for waiter " + f"'{waiter_name}': {expression}" + ) if acceptor.matcher in ['pathAll', 'pathAny']: - assert isinstance(search_result, list), \ - ("Attempted to use '%s' matcher in waiter '%s' " - "with non list result in JMESPath expression: %s" - % (acceptor.matcher, waiter_name, expression)) + assert isinstance(search_result, list), ( + f"Attempted to use '{acceptor.matcher}' matcher in waiter " + f"'{waiter_name}' with non list result in JMESPath expression: " + f"{expression}" + ) def _search_jmespath_expression(expression, op_model): diff --git a/tests/integration/test_client.py b/tests/integration/test_client.py index 7f6167e1cf..d246ad9d6e 100644 --- a/tests/integration/test_client.py +++ b/tests/integration/test_client.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import logging import datetime -from tests import unittest, random_chars +from tests import unittest import botocore.session from botocore.client import ClientError diff --git a/tests/integration/test_client_http.py b/tests/integration/test_client_http.py index a561e82e6c..7bf3534522 100644 --- a/tests/integration/test_client_http.py +++ b/tests/integration/test_client_http.py @@ -95,7 +95,6 @@ def do_CONNECT(self): finally: self.environ_patch.stop() - def _read_timeout_server(self): config = Config( read_timeout=0.1, diff --git a/tests/integration/test_credentials.py b/tests/integration/test_credentials.py index ee27ca9d9b..cf1a9ca319 100644 --- a/tests/integration/test_credentials.py +++ b/tests/integration/test_credentials.py @@ -67,9 +67,9 @@ def test_access_secret_vs_profile_code(self, credentials_cls): # If all three are given, then the access/secret keys should # take precedence. s = self.create_session(profile='test') - - client = s.create_client('s3', aws_access_key_id='code', - aws_secret_access_key='code-secret') + s.create_client( + 's3', aws_access_key_id='code', aws_secret_access_key='code-secret' + ) credentials_cls.assert_called_with( access_key='code', secret_key='code-secret', token=mock.ANY) @@ -79,7 +79,6 @@ def test_profile_env_vs_code(self): # then the one set by code should take precedence. os.environ['AWS_DEFAULT_PROFILE'] = 'test' s = self.create_session(profile='default') - credentials = s.get_credentials() self.assertEqual(credentials.access_key, 'default') @@ -92,9 +91,9 @@ def test_access_secret_env_vs_code(self, credentials_cls): os.environ['AWS_ACCESS_KEY_ID'] = 'env' os.environ['AWS_SECRET_ACCESS_KEY'] = 'secret' s = self.create_session() - - client = s.create_client('s3', aws_access_key_id='code', - aws_secret_access_key='code-secret') + s.create_client( + 's3', aws_access_key_id='code', aws_secret_access_key='code-secret' + ) credentials_cls.assert_called_with( access_key='code', secret_key='code-secret', token=mock.ANY) diff --git a/tests/integration/test_elastictranscoder.py b/tests/integration/test_elastictranscoder.py index c84cca70be..b20cbb53b6 100644 --- a/tests/integration/test_elastictranscoder.py +++ b/tests/integration/test_elastictranscoder.py @@ -28,6 +28,7 @@ ]} """ + class TestElasticTranscoder(unittest.TestCase): def setUp(self): self.session = botocore.session.get_session() diff --git a/tests/integration/test_emr.py b/tests/integration/test_emr.py index b9338a76c3..b215aad8fa 100644 --- a/tests/integration/test_emr.py +++ b/tests/integration/test_emr.py @@ -23,6 +23,7 @@ def botocore_session(): return botocore.session.get_session() + @pytest.mark.parametrize( "region", [ diff --git a/tests/integration/test_glacier.py b/tests/integration/test_glacier.py index 23a7f348f5..af0e00e3b0 100644 --- a/tests/integration/test_glacier.py +++ b/tests/integration/test_glacier.py @@ -68,4 +68,3 @@ def test_can_upload_archive_from_bytes(self): if __name__ == '__main__': unittest.main() - diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 1b61f5cf09..ff9f45843e 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -12,7 +12,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from tests import ( - mock, unittest, temporary_file, random_chars, + unittest, temporary_file, random_chars, ClientHTTPStubber, ConsistencyWaiter, ) import os @@ -28,7 +28,6 @@ import pytest import urllib3 -from botocore.endpoint import Endpoint from botocore.exceptions import ConnectionClosedError from botocore.compat import six, zip_longest, OrderedDict import botocore.session @@ -503,8 +502,9 @@ def test_unicode_system_character_with_list_v2(self): self.assertEqual(len(parsed['Contents']), 1) self.assertEqual(parsed['Contents'][0]['Key'], key_name) - parsed = self.client.list_objects_v2(Bucket=self.bucket_name, - EncodingType='url') + parsed = self.client.list_objects_v2( + Bucket=self.bucket_name, EncodingType='url' + ) self.assertEqual(len(parsed['Contents']), 1) self.assertEqual(parsed['Contents'][0]['Key'], 'foo%08') @@ -518,8 +518,9 @@ def test_unicode_system_character_with_list_object_versions(self): self.assertEqual(len(parsed['Versions']), 1) self.assertEqual(parsed['Versions'][0]['Key'], key_name) - parsed = self.client.list_object_versions(Bucket=self.bucket_name, - EncodingType='url') + parsed = self.client.list_object_versions( + Bucket=self.bucket_name, EncodingType='url' + ) self.assertEqual(len(parsed['Versions']), 1) self.assertEqual(parsed['Versions'][0]['Key'], 'foo%03') diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py index 43177454da..186e2c95f4 100644 --- a/tests/integration/test_smoke.py +++ b/tests/integration/test_smoke.py @@ -17,11 +17,10 @@ import pytest -from tests import mock, ClientHTTPStubber +from tests import ClientHTTPStubber from botocore import xform_name import botocore.session from botocore.client import ClientError -from botocore.endpoint import Endpoint from botocore.exceptions import ConnectionClosedError @@ -30,84 +29,76 @@ # Empty params means that the operation will be called with no params. This is # used as a quick verification that we can successfully make calls to services. SMOKE_TESTS = { - 'acm': {'ListCertificates': {}}, - 'apigateway': {'GetRestApis': {}}, - 'application-autoscaling': { - 'DescribeScalableTargets': { - 'ServiceNamespace': 'ecs' - }}, - 'autoscaling': {'DescribeAccountLimits': {}, - 'DescribeAdjustmentTypes': {}}, - 'cloudformation': {'DescribeStacks': {}, - 'ListStacks': {}}, - 'cloudfront': {'ListDistributions': {}, - 'ListStreamingDistributions': {}}, - 'cloudhsmv2': {'DescribeBackups': {}}, - 'cloudsearch': {'DescribeDomains': {}, - 'ListDomainNames': {}}, - 'cloudtrail': {'DescribeTrails': {}}, - 'cloudwatch': {'ListMetrics': {}}, - 'codecommit': {'ListRepositories': {}}, - 'codedeploy': {'ListApplications': {}}, - 'codepipeline': {'ListActionTypes': {}}, - 'cognito-identity': {'ListIdentityPools': {'MaxResults': 1}}, - 'cognito-sync': {'ListIdentityPoolUsage': {}}, - 'config': {'DescribeDeliveryChannels': {}}, - 'datapipeline': {'ListPipelines': {}}, - 'devicefarm': {'ListProjects': {}}, - 'directconnect': {'DescribeConnections': {}}, - 'ds': {'DescribeDirectories': {}}, - 'dynamodb': {'ListTables': {}}, - 'dynamodbstreams': {'ListStreams': {}}, - 'ec2': {'DescribeRegions': {}, - 'DescribeInstances': {}}, - 'ecr': {'DescribeRepositories': {}}, - 'ecs': {'DescribeClusters': {}}, - 'elasticache': {'DescribeCacheClusters': {}}, - 'elasticbeanstalk': {'DescribeApplications': {}}, - 'elastictranscoder': {'ListPipelines': {}}, - 'elb': {'DescribeLoadBalancers': {}}, - 'emr': {'ListClusters': {}}, - 'es': {'ListDomainNames': {}}, - 'events': {'ListRules': {}}, - 'firehose': {'ListDeliveryStreams': {}}, - 'gamelift': {'ListBuilds': {}}, - 'glacier': {'ListVaults': {}}, - 'iam': {'ListUsers': {}}, - # Does not work with session credentials so - # importexport tests are not run. - #'importexport': {'ListJobs': {}}, - 'importexport': {}, - 'inspector': {'DescribeCrossAccountAccessRole': {}}, - 'iot': {'DescribeEndpoint': {}}, - 'kinesis': {'ListStreams': {}}, - 'kms': {'ListKeys': {}}, - 'lambda': {'ListFunctions': {}}, - 'logs': {'DescribeLogGroups': {}}, - 'opsworks': {'DescribeStacks': {}}, - 'rds': {'DescribeDBInstances': {}}, - 'redshift': {'DescribeClusters': {}}, - 'route53': {'ListHostedZones': {}}, - 'route53domains': {'ListDomains': {}}, - 's3': {'ListBuckets': {}}, - 'sdb': {'ListDomains': {}}, - 'ses': {'ListIdentities': {}}, - 'shield': {'GetSubscriptionState': {}}, - 'sns': {'ListTopics': {}}, - 'sqs': {'ListQueues': {}}, - 'ssm': {'ListDocuments': {}}, - 'storagegateway': {'ListGateways': {}}, - # sts tests would normally go here, but - # there aren't any calls you can make when - # using session credentials so we don't run any - # sts tests. - 'sts': {}, - #'sts': {'GetSessionToken': {}}, - # Subscription needed for support API calls. - 'support': {}, - 'swf': {'ListDomains': {'registrationStatus': 'REGISTERED'}}, - 'waf': {'ListWebACLs': {'Limit': 1}}, - 'workspaces': {'DescribeWorkspaces': {}}, + 'acm': {'ListCertificates': {}}, + 'apigateway': {'GetRestApis': {}}, + 'application-autoscaling': {'DescribeScalableTargets': {'ServiceNamespace': 'ecs'}}, + 'autoscaling': {'DescribeAccountLimits': {}, 'DescribeAdjustmentTypes': {}}, + 'cloudformation': {'DescribeStacks': {}, 'ListStacks': {}}, + 'cloudfront': {'ListDistributions': {}, 'ListStreamingDistributions': {}}, + 'cloudhsmv2': {'DescribeBackups': {}}, + 'cloudsearch': {'DescribeDomains': {}, 'ListDomainNames': {}}, + 'cloudtrail': {'DescribeTrails': {}}, + 'cloudwatch': {'ListMetrics': {}}, + 'codecommit': {'ListRepositories': {}}, + 'codedeploy': {'ListApplications': {}}, + 'codepipeline': {'ListActionTypes': {}}, + 'cognito-identity': {'ListIdentityPools': {'MaxResults': 1}}, + 'cognito-sync': {'ListIdentityPoolUsage': {}}, + 'config': {'DescribeDeliveryChannels': {}}, + 'datapipeline': {'ListPipelines': {}}, + 'devicefarm': {'ListProjects': {}}, + 'directconnect': {'DescribeConnections': {}}, + 'ds': {'DescribeDirectories': {}}, + 'dynamodb': {'ListTables': {}}, + 'dynamodbstreams': {'ListStreams': {}}, + 'ec2': {'DescribeRegions': {}, 'DescribeInstances': {}}, + 'ecr': {'DescribeRepositories': {}}, + 'ecs': {'DescribeClusters': {}}, + 'elasticache': {'DescribeCacheClusters': {}}, + 'elasticbeanstalk': {'DescribeApplications': {}}, + 'elastictranscoder': {'ListPipelines': {}}, + 'elb': {'DescribeLoadBalancers': {}}, + 'emr': {'ListClusters': {}}, + 'es': {'ListDomainNames': {}}, + 'events': {'ListRules': {}}, + 'firehose': {'ListDeliveryStreams': {}}, + 'gamelift': {'ListBuilds': {}}, + 'glacier': {'ListVaults': {}}, + 'iam': {'ListUsers': {}}, + # Does not work with session credentials so + # importexport tests are not run. + # 'importexport': {'ListJobs': {}}, + 'importexport': {}, + 'inspector': {'DescribeCrossAccountAccessRole': {}}, + 'iot': {'DescribeEndpoint': {}}, + 'kinesis': {'ListStreams': {}}, + 'kms': {'ListKeys': {}}, + 'lambda': {'ListFunctions': {}}, + 'logs': {'DescribeLogGroups': {}}, + 'opsworks': {'DescribeStacks': {}}, + 'rds': {'DescribeDBInstances': {}}, + 'redshift': {'DescribeClusters': {}}, + 'route53': {'ListHostedZones': {}}, + 'route53domains': {'ListDomains': {}}, + 's3': {'ListBuckets': {}}, + 'sdb': {'ListDomains': {}}, + 'ses': {'ListIdentities': {}}, + 'shield': {'GetSubscriptionState': {}}, + 'sns': {'ListTopics': {}}, + 'sqs': {'ListQueues': {}}, + 'ssm': {'ListDocuments': {}}, + 'storagegateway': {'ListGateways': {}}, + # sts tests would normally go here, but + # there aren't any calls you can make when + # using session credentials so we don't run any + # sts tests. + 'sts': {}, + # 'sts': {'GetSessionToken': {}}, + # Subscription needed for support API calls. + 'support': {}, + 'swf': {'ListDomains': {'registrationStatus': 'REGISTERED'}}, + 'waf': {'ListWebACLs': {'Limit': 1}}, + 'workspaces': {'DescribeWorkspaces': {}}, } @@ -119,25 +110,32 @@ 'application-autoscaling': { 'DescribeScalableTargets': { 'ServiceNamespace': 'fake-service-namespace' - }}, - 'autoscaling': {'CreateLaunchConfiguration': { - 'LaunchConfigurationName': 'foo', - 'ImageId': 'ami-12345678', - 'InstanceType': 'm1.small', - }}, - 'cloudformation': {'CreateStack': { - 'StackName': 'fake', - 'TemplateURL': 'http://s3.amazonaws.com/foo/bar', - }}, + } + }, + 'autoscaling': { + 'CreateLaunchConfiguration': { + 'LaunchConfigurationName': 'foo', + 'ImageId': 'ami-12345678', + 'InstanceType': 'm1.small', + } + }, + 'cloudformation': { + 'CreateStack': { + 'StackName': 'fake', + 'TemplateURL': 'http://s3.amazonaws.com/foo/bar', + } + }, 'cloudfront': {'GetDistribution': {'Id': 'fake-id'}}, 'cloudhsmv2': {'ListTags': {'ResourceId': 'fake-id'}}, 'cloudsearch': {'DescribeIndexFields': {'DomainName': 'fakedomain'}}, 'cloudtrail': {'DeleteTrail': {'Name': 'fake-trail'}}, - 'cloudwatch': {'SetAlarmState': { - 'AlarmName': 'abc', - 'StateValue': 'mno', - 'StateReason': 'xyz', - }}, + 'cloudwatch': { + 'SetAlarmState': { + 'AlarmName': 'abc', + 'StateValue': 'mno', + 'StateReason': 'xyz', + } + }, 'logs': {'GetLogEvents': {'logGroupName': 'a', 'logStreamName': 'b'}}, 'codecommit': {'ListBranches': {'repositoryName': 'fake-repo'}}, 'codedeploy': {'GetDeployment': {'deploymentId': 'fake-id'}}, @@ -146,7 +144,7 @@ 'cognito-sync': {'DescribeIdentityPoolUsage': {'IdentityPoolId': 'fake'}}, 'config': { 'GetResourceConfigHistory': {'resourceType': '', 'resourceId': 'fake'}, - }, + }, 'datapipeline': {'GetPipelineDefinition': {'pipelineId': 'fake'}}, 'devicefarm': {'GetDevice': {'arn': 'arn:aws:devicefarm:REGION::device:f'}}, 'directconnect': {'DescribeConnections': {'connectionId': 'fake'}}, @@ -159,7 +157,7 @@ 'elasticache': {'DescribeCacheClusters': {'CacheClusterId': 'fake'}}, 'elasticbeanstalk': { 'DescribeEnvironmentResources': {'EnvironmentId': 'x'}, - }, + }, 'elb': {'DescribeLoadBalancers': {'LoadBalancerNames': ['fake']}}, 'elastictranscoder': {'ReadJob': {'Id': 'fake'}}, 'emr': {'DescribeCluster': {'ClusterId': 'fake'}}, @@ -181,18 +179,20 @@ 'sns': { 'ConfirmSubscription': {'TopicArn': 'a', 'Token': 'b'}, 'Publish': {'Message': 'hello', 'TopicArn': 'fake'}, - }, + }, 'sqs': {'GetQueueUrl': {'QueueName': 'fake'}}, 'ssm': {'GetDocument': {'Name': 'fake'}}, 'storagegateway': {'ListVolumes': {'GatewayARN': 'x'*50}}, 'sts': {'GetFederationToken': {'Name': 'fake', 'Policy': 'fake'}}, - 'support': {'CreateCase': { - 'subject': 'x', - 'communicationBody': 'x', - 'categoryCode': 'x', - 'serviceCode': 'x', - 'severityCode': 'low', - }}, + 'support': { + 'CreateCase': { + 'subject': 'x', + 'communicationBody': 'x', + 'categoryCode': 'x', + 'serviceCode': 'x', + 'severityCode': 'low', + } + }, 'swf': {'DescribeDomain': {'name': 'fake'}}, 'waf': {'GetWebACL': {'WebACLId': 'fake'}}, 'workspaces': {'DescribeWorkspaces': {'DirectoryId': 'fake-directory-id'}}, @@ -287,7 +287,7 @@ def test_can_make_request_and_understand_errors_with_client( client = _get_client(botocore_session, service_name) method = getattr(client, xform_name(operation_name)) with pytest.raises(ClientError): - response = method(**kwargs) + method(**kwargs) @pytest.mark.parametrize("service_name, operation_name, kwargs", _smoke_tests()) @@ -300,7 +300,7 @@ def test_client_can_retry_request_properly( with ClientHTTPStubber(client, strict=False) as http_stubber: http_stubber.responses.append(exception) try: - response = operation(**kwargs) + operation(**kwargs) except ClientError as e: assert False, ('Request was not retried properly, ' 'received error:\n%s' % pformat(e)) diff --git a/tests/integration/test_sts.py b/tests/integration/test_sts.py index a597760135..2b18adde6a 100644 --- a/tests/integration/test_sts.py +++ b/tests/integration/test_sts.py @@ -15,6 +15,7 @@ import botocore.session from botocore.exceptions import ClientError + class TestSTS(unittest.TestCase): def setUp(self): self.session = botocore.session.get_session() @@ -38,5 +39,5 @@ def test_regionalized_endpoints(self): self.assertEqual(sts.meta.endpoint_url, 'https://sts.us-west-2.amazonaws.com') # Signing error will be thrown with the incorrect region name included. - with self.assertRaisesRegex(ClientError, 'ap-southeast-1') as e: + with self.assertRaisesRegex(ClientError, 'ap-southeast-1'): sts.get_session_token() diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py index 87587083ab..92b0d915fb 100644 --- a/tests/unit/auth/test_sigv4.py +++ b/tests/unit/auth/test_sigv4.py @@ -30,11 +30,11 @@ import pytest -from tests import mock, FreezeTime +from tests import FreezeTime import botocore.auth from botocore.awsrequest import AWSRequest -from botocore.compat import six, urlsplit, parse_qsl, HAS_CRT +from botocore.compat import six, urlsplit, parse_qsl from botocore.credentials import Credentials @@ -153,21 +153,6 @@ def _test_signature_version_4(test_case): test_case.raw_request, 'authheader') -def _test_crt_signature_version_4(test_case): - test_case = SignatureTestCase(test_case) - request = create_request_from_raw_request(test_case.raw_request) - - # Use CRT logging to diagnose interim steps (canonical request, etc) - # import awscrt.io - # awscrt.io.init_logging(awscrt.io.LogLevel.Trace, 'stdout') - auth = botocore.crt.auth.CrtSigV4Auth(test_case.credentials, - SERVICE, REGION) - auth.add_auth(request) - actual_auth_header = request.headers['Authorization'] - assert_equal(actual_auth_header, test_case.authorization_header, - test_case.raw_request, 'authheader') - - def assert_equal(actual, expected, raw_request, part): if actual != expected: message = "The %s did not match" % part diff --git a/tests/unit/cfg/aws_config_nested_bad b/tests/unit/cfg/aws_config_nested_bad index ad178f8f09..f898780850 100644 --- a/tests/unit/cfg/aws_config_nested_bad +++ b/tests/unit/cfg/aws_config_nested_bad @@ -6,4 +6,3 @@ s3 = cloudwatch = signature_version = v4 region=us-west-2 - diff --git a/tests/unit/crt/auth/test_crt_signers.py b/tests/unit/crt/auth/test_crt_signers.py index 6c4de44134..81a53a09a4 100644 --- a/tests/unit/crt/auth/test_crt_signers.py +++ b/tests/unit/crt/auth/test_crt_signers.py @@ -1,5 +1,3 @@ -import unittest - import botocore from botocore.compat import HAS_CRT diff --git a/tests/unit/crt/auth/test_crt_sigv4.py b/tests/unit/crt/auth/test_crt_sigv4.py index db38566fb7..795175cd50 100644 --- a/tests/unit/crt/auth/test_crt_sigv4.py +++ b/tests/unit/crt/auth/test_crt_sigv4.py @@ -1,8 +1,6 @@ -import datetime - import pytest -from tests import mock, requires_crt, FreezeTime +from tests import requires_crt, FreezeTime from tests.unit.auth.test_sigv4 import ( DATE, SERVICE, diff --git a/tests/unit/docs/__init__.py b/tests/unit/docs/__init__.py index 8d7762282a..b71ab51f26 100644 --- a/tests/unit/docs/__init__.py +++ b/tests/unit/docs/__init__.py @@ -114,7 +114,7 @@ def _setup_models(self): 'type': 'string' } }, - 'documentation':'AWS MyService Description' + 'documentation': 'AWS MyService Description' } self.waiter_json_model = { diff --git a/tests/unit/docs/test_sharedexample.py b/tests/unit/docs/test_sharedexample.py index 0a84635403..067b36a3d2 100644 --- a/tests/unit/docs/test_sharedexample.py +++ b/tests/unit/docs/test_sharedexample.py @@ -41,7 +41,8 @@ def setUp(self): }) self.add_shape_to_params('foo', 'foo') self.add_shape_to_params('aloha', 'aloha') - self._examples = [{ + self._examples = [ + { "id": "sample-id", "title": "sample-title", "description": "Sample Description.", diff --git a/tests/unit/docs/test_utils.py b/tests/unit/docs/test_utils.py index b3dae2b739..2056460d32 100644 --- a/tests/unit/docs/test_utils.py +++ b/tests/unit/docs/test_utils.py @@ -224,4 +224,3 @@ class TestEscapeControls(unittest.TestCase): def test_escapes_controls(self): escaped = escape_controls('\na\rb\tc\fd\be') self.assertEqual(escaped, '\\na\\rb\\tc\\fd\\be') - diff --git a/tests/unit/put_object_data b/tests/unit/put_object_data index b1541d1071..eaa3e40f1c 100644 --- a/tests/unit/put_object_data +++ b/tests/unit/put_object_data @@ -1 +1 @@ -This is a test of PutObject. \ No newline at end of file +This is a test of PutObject. diff --git a/tests/unit/response_parsing/test_response_parsing.py b/tests/unit/response_parsing/test_response_parsing.py index 548c2a6664..2ac15b56e3 100644 --- a/tests/unit/response_parsing/test_response_parsing.py +++ b/tests/unit/response_parsing/test_response_parsing.py @@ -30,11 +30,11 @@ SPECIAL_CASES = [ - 'iam-get-user-policy.xml', # Needs the JSON decode from handlers.py + 'iam-get-user-policy.xml', # Needs the JSON decode from handlers.py 'iam-list-roles.xml', # Needs the JSON decode from handlers.py for the policy - 's3-get-bucket-location.xml', # Confirmed, this will need a special handler - #'s3-list-multipart-uploads.xml', # Bug in model, missing delimeter - 'cloudformation-get-template.xml', # Need to JSON decode the template body. + 's3-get-bucket-location.xml', # Confirmed, this will need a special handler + # 's3-list-multipart-uploads.xml', # Bug in model, missing delimeter + 'cloudformation-get-template.xml', # Need to JSON decode the template body. ] @@ -154,7 +154,6 @@ def _get_operation_model(service_model, filename): for operation_name in operation_names: if xform_name(operation_name) == opname.replace('-', '_'): return service_model.operation_model(operation_name) - return operation def _get_expected_parsed_result(filename): diff --git a/tests/unit/retries/test_adaptive.py b/tests/unit/retries/test_adaptive.py index b3a2191b48..b038baa09f 100644 --- a/tests/unit/retries/test_adaptive.py +++ b/tests/unit/retries/test_adaptive.py @@ -87,6 +87,7 @@ def test_max_rate_cant_exceed_20_percent_max(self): rate_limiter.on_receiving_response() self.assertEqual(self.token_bucket.max_rate, 2.0 * 20) + class TestRateClocker(unittest.TestCase): def setUp(self): diff --git a/tests/unit/retries/test_special.py b/tests/unit/retries/test_special.py index ed3d8831ae..081f259b43 100644 --- a/tests/unit/retries/test_special.py +++ b/tests/unit/retries/test_special.py @@ -1,7 +1,6 @@ from tests import mock from tests import unittest -from botocore.compat import six from botocore.awsrequest import AWSResponse from botocore.retries import standard, special diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py index b4bf276071..0a8016ba01 100644 --- a/tests/unit/retries/test_standard.py +++ b/tests/unit/retries/test_standard.py @@ -150,6 +150,7 @@ }, } + @pytest.mark.parametrize('case', RETRYABLE_TRANSIENT_ERRORS) def test_can_detect_retryable_transient_errors(case): transient_checker = standard.TransientRetryableChecker() @@ -170,7 +171,8 @@ def test_can_detect_modeled_retryable_errors(case): ) -@pytest.mark.parametrize('case', +@pytest.mark.parametrize( + 'case', [ case for case in RETRYABLE_TRANSIENT_ERRORS + @@ -208,7 +210,7 @@ def _verify_retryable(checker, operation_model, if isinstance(error, Exception): caught_exception = error else: - parsed_response = {'Error': {'Code': error, 'Message': 'Error'}} + parsed_response = {'Error': {'Code': error, 'Message': 'Error'}} context = standard.RetryContext( attempt_number=1, operation_model=operation_model, diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py index f02595f1a5..b09c8cbd49 100644 --- a/tests/unit/test_args.py +++ b/tests/unit/test_args.py @@ -329,27 +329,27 @@ def test_can_merge_max_attempts(self): def test_uses_config_value_if_present_for_max_attempts(self): config = self.call_get_client_args( - client_config=Config(retries={'max_attempts': 2}) + client_config=Config(retries={'max_attempts': 2}) )['client_config'] self.assertEqual(config.retries['total_max_attempts'], 3) def test_uses_client_config_over_config_store_max_attempts(self): self.config_store.set_config_variable('max_attempts', 4) config = self.call_get_client_args( - client_config=Config(retries={'max_attempts': 2}) + client_config=Config(retries={'max_attempts': 2}) )['client_config'] self.assertEqual(config.retries['total_max_attempts'], 3) def test_uses_client_config_total_over_config_store_max_attempts(self): self.config_store.set_config_variable('max_attempts', 4) config = self.call_get_client_args( - client_config=Config(retries={'total_max_attempts': 2}) + client_config=Config(retries={'total_max_attempts': 2}) )['client_config'] self.assertEqual(config.retries['total_max_attempts'], 2) def test_max_attempts_unset_if_retries_is_none(self): config = self.call_get_client_args( - client_config=Config(retries=None) + client_config=Config(retries=None) )['client_config'] self.assertEqual(config.retries, {'mode': 'legacy'}) @@ -360,13 +360,13 @@ def test_retry_mode_set_on_config_store(self): def test_retry_mode_set_on_client_config(self): config = self.call_get_client_args( - client_config=Config(retries={'mode': 'standard'}) + client_config=Config(retries={'mode': 'standard'}) )['client_config'] self.assertEqual(config.retries['mode'], 'standard') def test_client_config_beats_config_store(self): self.config_store.set_config_variable('retry_mode', 'adaptive') config = self.call_get_client_args( - client_config=Config(retries={'mode': 'standard'}) + client_config=Config(retries={'mode': 'standard'}) )['client_config'] self.assertEqual(config.retries['mode'], 'standard') diff --git a/tests/unit/test_awsrequest.py b/tests/unit/test_awsrequest.py index 0e13c69001..c6e30837d1 100644 --- a/tests/unit/test_awsrequest.py +++ b/tests/unit/test_awsrequest.py @@ -19,12 +19,11 @@ import shutil import io import socket -import sys from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool from botocore.exceptions import UnseekableStreamError -from botocore.awsrequest import AWSRequest, AWSPreparedRequest, AWSResponse +from botocore.awsrequest import AWSRequest, AWSResponse from botocore.awsrequest import AWSHTTPConnection, AWSHTTPSConnection, HeadersDict from botocore.awsrequest import prepare_request_dict, create_request_object from botocore.compat import file_type, six @@ -236,13 +235,18 @@ def test_duck_type_for_file_check(self): # we first need to determine if the thing is a file like object. # We should not be using an isinstance check. Instead, we should # be using duck type checks. + class LooksLikeFile(object): + def __init__(self): self.seek_called = False + def read(self, amount=None): pass + def seek(self, where): self.seek_called = True + looks_like_file = LooksLikeFile() self.prepared_request.body = looks_like_file self.prepared_request.reset_stream() @@ -504,8 +508,9 @@ def test_state_reset_on_connection_close(self): conn.sock = s wait_mock.return_value = True - conn.request('GET', '/bucket/foo', b'body', - {'Expect': b'100-continue'}) + conn.request( + 'GET', '/bucket/foo', b'body', {'Expect': b'100-continue'} + ) self.assertEqual(wait_mock.call_count, 1) response = conn.getresponse() self.assertEqual(response.status, 500) @@ -525,8 +530,9 @@ def test_state_reset_on_connection_close(self): # that was sent back. wait_mock.return_value = True - conn.request('GET', '/bucket/foo', b'body', - {'Expect': b'100-continue'}) + conn.request( + 'GET', '/bucket/foo', b'body', {'Expect': b'100-continue'} + ) # Assert that we waited for the 100-continue response self.assertEqual(wait_mock.call_count, 2) response = conn.getresponse() diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 03f7f97af6..96350fe028 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -15,7 +15,6 @@ from tests import unittest import botocore -from botocore import utils from botocore import client from botocore.endpoint import DEFAULT_TIMEOUT from botocore import hooks @@ -347,7 +346,7 @@ def test_signing_region_does_not_change_client_region(self): 'hostname': 'endpoint.url', 'endpointName': 'us-west-2', 'signatureVersions': ['v4'], - 'credentialScope': {'region': credential_scope_region,} + 'credentialScope': {'region': credential_scope_region} } creator = self.create_client_creator() service_client = creator.create_client( @@ -387,7 +386,7 @@ def test_client_uses_signing_name_from_credential_scope(self): 'credentialScope': {'service': 'override'} } creator = self.create_client_creator() - service_client = creator.create_client( + creator.create_client( service_name='myservice', region_name='us-west-2', credentials=self.credentials) call_args = mock_signer.call_args @@ -914,7 +913,7 @@ def test_can_register_standard_retry_mode_from_config_store(self): ) creator = self.create_client_creator(config_store=config_store) with mock.patch('botocore.client.standard') as standard: - creator.create_client( 'myservice', 'us-west-2') + creator.create_client('myservice', 'us-west-2') self.assertTrue(standard.register_retry_handler.called) def test_try_to_paginate_non_paginated(self): @@ -1652,8 +1651,7 @@ def setUp(self): 'dnsSuffix': 'amazonaws.com', 'signatureVersions': ['s3', 's3v4'] } - self.resolver.construct_endpoint.return_value = \ - self.boilerplate_response + self.resolver.construct_endpoint.return_value = self.boilerplate_response def test_guesses_endpoint_as_last_resort(self): resolver = mock.Mock() @@ -1665,8 +1663,9 @@ def test_guesses_endpoint_as_last_resort(self): self.assertEqual('myservice', resolved['signing_name']) self.assertEqual('myservice', resolved['service_name']) self.assertEqual('v4', resolved['signature_version']) - self.assertEqual('https://myservice.guess.amazonaws.com', - resolved['endpoint_url']) + self.assertEqual( + 'https://myservice.guess.amazonaws.com', resolved['endpoint_url'] + ) def test_uses_us_east_1_by_default_for_s3(self): resolver = mock.Mock() @@ -1678,8 +1677,7 @@ def test_uses_us_east_1_by_default_for_s3(self): resolved = bridge.resolve('s3') self.assertEqual('us-east-1', resolved['region_name']) self.assertEqual('us-east-1', resolved['signing_region']) - self.assertEqual('https://s3.amazonaws.com', - resolved['endpoint_url']) + self.assertEqual('https://s3.amazonaws.com', resolved['endpoint_url']) def test_uses_region_from_client_config_if_available(self): resolver = mock.Mock() @@ -1738,16 +1736,6 @@ def test_can_create_http_urls(self): resolved = bridge.resolve('myservice', 'us-foo-baz', is_secure=False) self.assertEqual('http://host.com', resolved['endpoint_url']) - def test_can_create_http_urls(self): - resolver = mock.Mock() - resolver.construct_endpoint.return_value = { - 'partition': 'aws', 'hostname': 'host.com', - 'signatureVersions': ['v4'], - 'endpointName': 'us-foo-baz'} - bridge = ClientEndpointBridge(resolver) - resolved = bridge.resolve('myservice', 'us-foo-baz', is_secure=False) - self.assertEqual('http://host.com', resolved['endpoint_url']) - def test_credential_scope_overrides_signing_region(self): resolver = mock.Mock() resolver.construct_endpoint.return_value = { diff --git a/tests/unit/test_compat.py b/tests/unit/test_compat.py index f86eed69e3..d5e7c3fcd6 100644 --- a/tests/unit/test_compat.py +++ b/tests/unit/test_compat.py @@ -209,7 +209,7 @@ def test_get_tzinfo_options(self): class TestCRTIntegration(unittest.TestCase): def test_has_crt_global(self): try: - import awscrt.auth + import awscrt.auth # noqa assert HAS_CRT except ImportError: assert not HAS_CRT diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py index c3ccc1416e..4e058b2465 100644 --- a/tests/unit/test_config_provider.py +++ b/tests/unit/test_config_provider.py @@ -15,7 +15,6 @@ from tests import mock from tests import unittest -import botocore import botocore.session as session from botocore.configprovider import ConfigValueStore from botocore.configprovider import BaseProvider diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py index 1c4ff2e08e..0713ac7d45 100644 --- a/tests/unit/test_configloader.py +++ b/tests/unit/test_configloader.py @@ -59,7 +59,7 @@ def create_config_file(self, filename): def test_config_not_found(self): with self.assertRaises(botocore.exceptions.ConfigNotFound): - loaded_config = raw_config_parse(path('aws_config_notfound')) + raw_config_parse(path('aws_config_notfound')) def test_config_parse_error(self): filename = path('aws_config_bad') @@ -127,14 +127,14 @@ def test_nested_hierarchy_with_no_subsection_parsing(self): def test_nested_bad_config(self): filename = path('aws_config_nested_bad') with self.assertRaises(botocore.exceptions.ConfigParseError): - loaded_config = load_config(filename) + load_config(filename) def test_nested_bad_config_filesystem_encoding_none(self): filename = path('aws_config_nested_bad') with mock.patch('sys.getfilesystemencoding') as encoding: encoding.return_value = None with self.assertRaises(botocore.exceptions.ConfigParseError): - loaded_config = load_config(filename) + load_config(filename) def test_multi_file_load(self): filenames = [path('aws_config_other'), diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py index 782bd7d224..8c34dea5d5 100644 --- a/tests/unit/test_credentials.py +++ b/tests/unit/test_credentials.py @@ -16,8 +16,6 @@ import os import tempfile import shutil -import json -import copy from dateutil.tz import tzlocal, tzutc @@ -2695,10 +2693,13 @@ def test_permissions_for_file_restricted(self): self.assertEqual(os.stat(filename).st_mode & 0xFFF, 0o600) def test_cache_with_custom_dumps_func(self): + def _custom_serializer(obj): return "custom foo" + def _custom_dumps(obj): return json.dumps(obj, default=_custom_serializer) + custom_dir = os.path.join(self.tempdir, 'custom') custom_cache = credentials.JSONFileCache( custom_dir, @@ -2875,15 +2876,14 @@ def test_http_error_propagated(self): 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI': '/latest/credentials?id=foo' } fetcher = mock.Mock(spec=credentials.ContainerMetadataFetcher) - timeobj = datetime.now(tzlocal()) - expired_timestamp = (timeobj - timedelta(hours=23)).isoformat() - future_timestamp = (timeobj + timedelta(hours=1)).isoformat() exception = botocore.exceptions.CredentialRetrievalError - fetcher.retrieve_full_uri.side_effect = exception(provider='ecs-role', - error_msg='fake http error') + fetcher.retrieve_full_uri.side_effect = exception( + provider='ecs-role', error_msg='fake http error' + ) + provider = credentials.ContainerProvider(environ, fetcher) + with self.assertRaises(exception): - provider = credentials.ContainerProvider(environ, fetcher) - creds = provider.load() + provider.load() def test_http_error_propagated_on_refresh(self): # We should ensure errors are still propagated even in the @@ -2910,7 +2910,7 @@ def test_http_error_propagated_on_refresh(self): creds = provider.load() # Second time with a refresh should propagate an error. with self.assertRaises(raised_exception): - frozen_creds = creds.get_frozen_credentials() + creds.get_frozen_credentials() def test_can_use_full_url(self): environ = { @@ -2970,8 +2970,9 @@ def setUp(self): spec=subprocess.Popen) def create_process_provider(self, profile_name='default'): - provider = ProcessProvider(profile_name, self.load_config, - popen=self.popen_mock) + provider = ProcessProvider( + profile_name, self.load_config, popen=self.popen_mock + ) return provider def _get_output(self, stdout, stderr=''): @@ -3312,7 +3313,7 @@ def test_raises_helpful_message_on_unauthorized_exception(self): ) with self.assertRaises(botocore.exceptions.UnauthorizedSSOTokenError): with self.stubber: - credentials = self.fetcher.fetch_credentials() + self.fetcher.fetch_credentials() class TestSSOProvider(unittest.TestCase): @@ -3335,7 +3336,7 @@ def setUp(self): 'sso_role_name': self.role_name, 'sso_account_id': self.account_id, } - self.expires_at = datetime.now(tzlocal()) + timedelta(hours=24) + self.expires_at = datetime.now(tzlocal()) + timedelta(hours=24) self.cached_creds_key = '048db75bbe50955c16af7aba6ff9c41a3131bb7e' self.cached_token_key = '13f9d35043871d073ab260e020f0ffde092cb14b' self.cache = { diff --git a/tests/unit/test_endpoint.py b/tests/unit/test_endpoint.py index 060a0e5bc4..34b2834e73 100644 --- a/tests/unit/test_endpoint.py +++ b/tests/unit/test_endpoint.py @@ -16,11 +16,8 @@ from tests import unittest from botocore.compat import six -from botocore.awsrequest import AWSRequest from botocore.endpoint import Endpoint, DEFAULT_TIMEOUT from botocore.endpoint import EndpointCreator -from botocore.exceptions import EndpointConnectionError -from botocore.exceptions import ConnectionClosedError from botocore.exceptions import HTTPClientError from botocore.httpsession import URLLib3Session from botocore.model import OperationModel, ServiceId @@ -297,7 +294,7 @@ def test_creates_endpoint_with_configured_url(self): self.assertEqual(endpoint.host, 'https://endpoint.url') def test_create_endpoint_with_default_timeout(self): - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', http_session_cls=self.mock_session) @@ -305,7 +302,7 @@ def test_create_endpoint_with_default_timeout(self): self.assertEqual(session_args.get('timeout'), DEFAULT_TIMEOUT) def test_create_endpoint_with_customized_timeout(self): - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', timeout=123, http_session_cls=self.mock_session) @@ -313,7 +310,7 @@ def test_create_endpoint_with_customized_timeout(self): self.assertEqual(session_args.get('timeout'), 123) def test_get_endpoint_default_verify_ssl(self): - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', http_session_cls=self.mock_session) @@ -321,7 +318,7 @@ def test_get_endpoint_default_verify_ssl(self): self.assertTrue(session_args.get('verify')) def test_verify_ssl_can_be_disabled(self): - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', verify=False, http_session_cls=self.mock_session) @@ -329,7 +326,7 @@ def test_verify_ssl_can_be_disabled(self): self.assertFalse(session_args.get('verify')) def test_verify_ssl_can_specify_cert_bundle(self): - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', verify='/path/cacerts.pem', http_session_cls=self.mock_session) @@ -338,7 +335,7 @@ def test_verify_ssl_can_specify_cert_bundle(self): def test_client_cert_can_specify_path(self): client_cert = '/some/path/cert' - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', client_cert=client_cert, http_session_cls=self.mock_session) @@ -347,7 +344,7 @@ def test_client_cert_can_specify_path(self): def test_honor_cert_bundle_env_var(self): self.environ['REQUESTS_CA_BUNDLE'] = '/env/cacerts.pem' - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', http_session_cls=self.mock_session) @@ -356,7 +353,7 @@ def test_honor_cert_bundle_env_var(self): def test_env_ignored_if_explicitly_passed(self): self.environ['REQUESTS_CA_BUNDLE'] = '/env/cacerts.pem' - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', verify='/path/cacerts.pem', http_session_cls=self.mock_session) @@ -365,7 +362,7 @@ def test_env_ignored_if_explicitly_passed(self): self.assertEqual(session_args.get('verify'), '/path/cacerts.pem') def test_can_specify_max_pool_conns(self): - endpoint = self.creator.create_endpoint( + self.creator.create_endpoint( self.service_model, region_name='us-west-2', endpoint_url='https://example.com', max_pool_connections=100, diff --git a/tests/unit/test_eventstream.py b/tests/unit/test_eventstream.py index 678c9329b1..6ae00a41ee 100644 --- a/tests/unit/test_eventstream.py +++ b/tests/unit/test_eventstream.py @@ -302,10 +302,10 @@ def test_negative_cases(encoded, exception): def test_header_parser(): """Test that the header parser supports all header types. """ headers_data = ( - b"\x010\x00\x011\x01\x012\x02\x02\x013\x03\x00\x03" - b"\x014\x04\x00\x00\x00\x04\x015\x05\x00\x00\x00\x00\x00\x00\x00\x05" - b"\x016\x06\x00\x05bytes\x017\x07\x00\x04utf8" - b"\x018\x08\x00\x00\x00\x00\x00\x00\x00\x08\x019\x090123456789abcdef" + b"\x010\x00\x011\x01\x012\x02\x02\x013\x03\x00\x03" + b"\x014\x04\x00\x00\x00\x04\x015\x05\x00\x00\x00\x00\x00\x00\x00\x05" + b"\x016\x06\x00\x05bytes\x017\x07\x00\x04utf8" + b"\x018\x08\x00\x00\x00\x00\x00\x00\x00\x08\x019\x090123456789abcdef" ) expected_headers = { @@ -337,7 +337,7 @@ def test_message_prelude_properties(): def test_message_to_response_dict(): response_dict = PAYLOAD_ONE_STR_HEADER[1].to_response_dict() - assert response_dict['status_code'] ==200 + assert response_dict['status_code'] == 200 expected_headers = {'content-type': 'application/json'} assert response_dict['headers'] == expected_headers @@ -424,6 +424,7 @@ def test_unpack_prelude(): def create_mock_raw_stream(*data): raw_stream = mock.Mock() + def generator(): for chunk in data: yield chunk diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index f0b8bb111d..6d0eb6b41e 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -23,7 +23,6 @@ from botocore.compat import OrderedDict from botocore.exceptions import ParamValidationError, MD5UnavailableError from botocore.exceptions import AliasConflictParameterError -from botocore.exceptions import MissingServiceIdError from botocore.awsrequest import AWSRequest from botocore.compat import quote, six from botocore.config import Config @@ -31,10 +30,8 @@ from botocore.docs.params import RequestParamsDocumenter from botocore.docs.example import RequestExampleDocumenter from botocore.hooks import HierarchicalEmitter -from botocore.loaders import Loader from botocore.model import OperationModel, ServiceModel, ServiceId from botocore.model import DenormalizedStructureBuilder -from botocore.session import Session from botocore.signers import RequestSigner from botocore.credentials import Credentials from botocore.utils import conditionally_calculate_md5 @@ -1304,10 +1301,10 @@ def test_alias_parameter_in_documentation_request_params(self): self.sample_section ) contents = self.sample_section.flush_structure().decode('utf-8') - self.assertIn(':type ' + self.alias_name + ':', contents) - self.assertIn(':param ' + self.alias_name + ':', contents) - self.assertNotIn(':type ' + self.original_name + ':', contents) - self.assertNotIn(':param ' + self.original_name + ':', contents) + self.assertIn(':type ' + self.alias_name + ':', contents) + self.assertIn(':param ' + self.alias_name + ':', contents) + self.assertNotIn(':type ' + self.original_name + ':', contents) + self.assertNotIn(':param ' + self.original_name + ':', contents) def test_alias_parameter_in_documentation_request_example(self): RequestExampleDocumenter( @@ -1318,7 +1315,7 @@ def test_alias_parameter_in_documentation_request_example(self): self.sample_section ) contents = self.sample_section.flush_structure().decode('utf-8') - self.assertIn(self.alias_name + '=', contents) + self.assertIn(self.alias_name + '=', contents) self.assertNotIn(self.original_name + '=', contents) @@ -1366,10 +1363,8 @@ def test_does_prepend_to_host_with_more_components(self): def test_does_validate_long_host(self): with self.assertRaises(ParamValidationError): - self._prepend_to_host( - 'https://example.com/path', 'toolong'*100) + self._prepend_to_host('https://example.com/path', 'toolong' * 100) def test_does_validate_host_with_illegal_char(self): with self.assertRaises(ParamValidationError): - self._prepend_to_host( - 'https://example.com/path', 'host#name') + self._prepend_to_host('https://example.com/path', 'host#name') diff --git a/tests/unit/test_hooks.py b/tests/unit/test_hooks.py index 885686c269..c35a428300 100644 --- a/tests/unit/test_hooks.py +++ b/tests/unit/test_hooks.py @@ -567,10 +567,12 @@ def test_copy_emitter(self): # Here we're not testing copy directly, we're testing # the observable behavior from copying an event emitter. first = [] + def first_handler(id_name, **kwargs): first.append(id_name) second = [] + def second_handler(id_name, **kwargs): second.append(id_name) @@ -609,10 +611,12 @@ def test_copy_emitter_with_unique_id_event(self): # Here we're not testing copy directly, we're testing # the observable behavior from copying an event emitter. first = [] + def first_handler(id_name, **kwargs): first.append(id_name) second = [] + def second_handler(id_name, **kwargs): second.append(id_name) diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py index 927805227f..d893fde100 100644 --- a/tests/unit/test_http_session.py +++ b/tests/unit/test_http_session.py @@ -5,7 +5,6 @@ from tests import mock, unittest -from botocore.vendored import six from botocore.awsrequest import AWSRequest from botocore.awsrequest import AWSHTTPConnectionPool, AWSHTTPSConnectionPool from botocore.httpsession import get_cert_path @@ -236,7 +235,7 @@ def test_https_proxy_scheme_tls_in_tls(self): def test_https_proxy_scheme_forwarding_https_url(self): proxies = {'https': 'https://proxy.com'} - proxies_config = {"proxy_use_forwarding_for_https": True} + proxies_config = {"proxy_use_forwarding_for_https": True} session = URLLib3Session(proxies=proxies, proxies_config=proxies_config) self.request.url = 'https://example.com/' session.send(self.request.prepare()) @@ -393,7 +392,7 @@ def make_request_with_error(self, error): def test_catches_new_connection_error(self): error = NewConnectionError(None, None) with pytest.raises(EndpointConnectionError): - self.make_request_with_error(error) + self.make_request_with_error(error) def test_catches_bad_status_line(self): error = ProtocolError(None) @@ -401,7 +400,7 @@ def test_catches_bad_status_line(self): self.make_request_with_error(error) def test_aws_connection_classes_are_used(self): - session = URLLib3Session() + session = URLLib3Session() # noqa # ensure the pool manager is using the correct classes http_class = self.pool_manager.pool_classes_by_scheme.get('http') self.assertIs(http_class, AWSHTTPConnectionPool) diff --git a/tests/unit/test_loaders.py b/tests/unit/test_loaders.py index 7afb52eb8c..c89e6be000 100644 --- a/tests/unit/test_loaders.py +++ b/tests/unit/test_loaders.py @@ -351,7 +351,6 @@ def loader_with_fake_dirs(self): with mock.patch('os.path.isdir', mock.Mock(return_value=True)): yield loader - def fake_listdir(self, dirname): parts = dirname.split(os.path.sep) result = self.fake_directories @@ -426,7 +425,7 @@ def test_determine_latest(self): }, } with self.loader_with_fake_dirs() as loader: - latest = loader.determine_latest_version('ec2', 'service-2') + loader.determine_latest_version('ec2', 'service-2') self.assertEqual(loader.determine_latest_version('ec2', 'service-2'), '2014-10-01') self.assertEqual(loader.determine_latest_version('ec2', 'service-1'), diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py index 0fedbab87a..10565ca608 100644 --- a/tests/unit/test_model.py +++ b/tests/unit/test_model.py @@ -4,7 +4,6 @@ from botocore import model from botocore.compat import OrderedDict -from botocore.exceptions import MissingServiceIdError @pytest.mark.parametrize("property_name", ['api_version', 'protocol']) @@ -573,11 +572,15 @@ def test_deep_merge(self): # map_merged has a serialization as a member trait as well as # in the StrToStrMap. # The member trait should have precedence. - self.assertEqual(map_merged.serialization, - # member beats the definition. - {'name': 'Attribute', - # From the definition. - 'flattened': True,}) + self.assertEqual( + map_merged.serialization, + # member beats the definition. + { + 'name': 'Attribute', + # From the definition. + 'flattened': True, + } + ) # Ensure we don't merge/mutate the original dicts. self.assertEqual(map_merged.key.serialization['name'], 'Name') self.assertEqual(map_merged.value.serialization['name'], 'Value') @@ -696,7 +699,7 @@ def test_shape_type_structure(self): } }, 'passwordType': { - "type":"string", + "type": "string", } } resolver = model.ShapeResolver(shapes) @@ -740,10 +743,10 @@ def test_shape_metadata(self): } }, 'passwordType': { - "type":"string", - "min":1, - "max":128, - "sensitive":True + "type": "string", + "min": 1, + "max": 128, + "sensitive": True } } resolver = model.ShapeResolver(shapes) @@ -778,7 +781,7 @@ def test_error_shape_metadata(self): def test_shape_list(self): shapes = { 'mfaDeviceListType': { - "type":"list", + "type": "list", "member": {"shape": "MFADevice"}, }, 'MFADevice': { diff --git a/tests/unit/test_paginate.py b/tests/unit/test_paginate.py index 20406363a0..3ae5c1cf72 100644 --- a/tests/unit/test_paginate.py +++ b/tests/unit/test_paginate.py @@ -89,7 +89,7 @@ def test_get_paginator(self): def test_get_paginator_no_exists(self): with self.assertRaises(ValueError): - paginator_config = self.paginator_model.get_paginator('ListBars') + self.paginator_model.get_paginator('ListBars') class TestPagination(unittest.TestCase): @@ -1407,11 +1407,11 @@ def setUp(self): self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_int_page_size(self): - res = list(self.paginator.paginate(PaginationConfig={'PageSize': 1})) + list(self.paginator.paginate(PaginationConfig={'PageSize': 1})) self.method.assert_called_with(MaxItems='1') def test_str_page_size(self): - res = list(self.paginator.paginate(PaginationConfig={'PageSize': '1'})) + list(self.paginator.paginate(PaginationConfig={'PageSize': '1'})) self.method.assert_called_with(MaxItems='1') diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py index 29ea92ad5b..515c1bc174 100644 --- a/tests/unit/test_parsers.py +++ b/tests/unit/test_parsers.py @@ -401,7 +401,7 @@ def test_base_json_parser_handles_unknown_member(self): 'OutputShape', { 'type': 'structure', - 'union':True, + 'union': True, 'members': { 'Str': { 'shape': 'StringType', @@ -443,7 +443,7 @@ def test_base_xml_parser_handles_unknown_member(self): 'OutputShape', { 'type': 'structure', - 'union':True, + 'union': True, 'resultWrapper': 'OperationNameResult', 'members': { 'Str': { @@ -464,7 +464,7 @@ def test_base_xml_parser_handles_unknown_member(self): 'SDK_UNKNOWN_MEMBER': { 'name': 'Foo' }, - 'ResponseMetadata':{ + 'ResponseMetadata': { 'RequestId': 'request-id', 'HTTPStatusCode': 200, 'HTTPHeaders': {} @@ -484,7 +484,7 @@ def test_parser_errors_out_when_multiple_members_set(self): 'OutputShape', { 'type': 'structure', - 'union':True, + 'union': True, 'members': { 'Foo': { 'shape': 'StringType', @@ -1268,7 +1268,7 @@ def test_can_parse_glacier_error_response(self): body = (b'{"code":"AccessDeniedException","type":"Client","message":' b'"Access denied"}') headers = { - 'x-amzn-requestid': 'request-id' + 'x-amzn-requestid': 'request-id' } parser = parsers.RestJSONParser() parsed = parser.parse( @@ -1285,7 +1285,7 @@ def test_can_parse_restjson_error_code(self): "message": "blah", "deletes": 0}''' headers = { - 'x-amzn-requestid': 'request-id' + 'x-amzn-requestid': 'request-id' } parser = parsers.RestJSONParser() parsed = parser.parse( @@ -1297,7 +1297,7 @@ def test_can_parse_with_case_insensitive_keys(self): body = (b'{"Code":"AccessDeniedException","type":"Client","Message":' b'"Access denied"}') headers = { - 'x-amzn-requestid': 'request-id' + 'x-amzn-requestid': 'request-id' } parser = parsers.RestJSONParser() parsed = parser.parse( @@ -1407,7 +1407,7 @@ def test_can_parse_route53_with_missing_message(self): # We should be able to handle this gracefully and still at least # populate a "Message" key so that consumers don't have to # conditionally check for this. - body = ( + body = ( '' ' ' ' Sender' @@ -1425,6 +1425,7 @@ def test_can_parse_route53_with_missing_message(self): # still populate an empty string. self.assertEqual(error['Message'], '') + def _generic_test_bodies(): generic_html_body = ( 'Http/1.1 Service Unavailable' @@ -1434,7 +1435,9 @@ def _generic_test_bodies(): return [generic_html_body, empty_body, none_body] -@pytest.mark.parametrize("parser, body", + +@pytest.mark.parametrize( + "parser, body", itertools.product( parsers.PROTOCOL_PARSERS.values(), _generic_test_bodies() diff --git a/tests/unit/test_protocols.py b/tests/unit/test_protocols.py index c33cfea9b1..9e5e232797 100644 --- a/tests/unit/test_protocols.py +++ b/tests/unit/test_protocols.py @@ -63,10 +63,13 @@ from botocore.compat import json, OrderedDict, urlsplit from botocore.eventstream import EventStream from botocore.model import ServiceModel, OperationModel -from botocore.serialize import EC2Serializer, QuerySerializer, \ - JSONSerializer, RestJSONSerializer, RestXMLSerializer -from botocore.parsers import QueryParser, JSONParser, \ - RestJSONParser, RestXMLParser, EC2QueryParser +from botocore.serialize import ( + EC2Serializer, QuerySerializer, JSONSerializer, RestJSONSerializer, + RestXMLSerializer, +) +from botocore.parsers import ( + QueryParser, JSONParser, RestJSONParser, RestXMLParser, EC2QueryParser, +) from botocore.utils import parse_timestamp, percent_encode_sequence from botocore.awsrequest import prepare_request_dict from calendar import timegm @@ -168,6 +171,7 @@ def __init__(self, data): def stream(self): yield self._data + @pytest.mark.parametrize( "json_description, case, basename", _compliance_tests(TestType.OUTPUT) diff --git a/tests/unit/test_regions.py b/tests/unit/test_regions.py index 65987407bf..6e81f57115 100644 --- a/tests/unit/test_regions.py +++ b/tests/unit/test_regions.py @@ -43,13 +43,11 @@ def _template(self): }, 's3': { 'defaults': { - 'sslCommonName': \ - '{service}.{region}.{dnsSuffix}' + 'sslCommonName': '{service}.{region}.{dnsSuffix}' }, 'endpoints': { 'us-foo': { - 'sslCommonName': \ - '{region}.{service}.{dnsSuffix}' + 'sslCommonName': '{region}.{service}.{dnsSuffix}' }, 'us-bar': {}, 'eu-baz': {'hostname': 'foo'} @@ -154,8 +152,9 @@ def test_returns_none_when_no_match(self): def test_constructs_regionalized_endpoints_for_exact_matches(self): resolver = regions.EndpointResolver(self._template()) result = resolver.construct_endpoint('not-regionalized', 'eu-baz') - self.assertEqual('not-regionalized.eu-baz.amazonaws.com', - result['hostname']) + self.assertEqual( + 'not-regionalized.eu-baz.amazonaws.com', result['hostname'] + ) self.assertEqual('aws', result['partition']) self.assertEqual('eu-baz', result['endpointName']) diff --git a/tests/unit/test_response.py b/tests/unit/test_response.py index d3531162f8..29afcd0be2 100644 --- a/tests/unit/test_response.py +++ b/tests/unit/test_response.py @@ -21,7 +21,7 @@ from botocore import response from botocore.compat import six from botocore.exceptions import IncompleteReadError, ReadTimeoutError -from botocore.awsrequest import AWSRequest, AWSResponse +from botocore.awsrequest import AWSResponse XMLBODY1 = (b'' b'AccessDenied' @@ -286,21 +286,29 @@ def test_get_response_nonstreaming_ng(self): self.assert_response_with_subset_metadata( response.get_response(operation_model, http_response)[1], - {u'Contents': [{u'ETag': '"00000000000000000000000000000000"', - u'Key': 'test.png', - u'LastModified': datetime.datetime(2014, 3, 1, 17, 6, 40, tzinfo=tzutc()), - u'Owner': {u'DisplayName': 'dummy', - u'ID': 'AAAAAAAAAAAAAAAAAAA'}, - u'Size': 6702, - u'StorageClass': 'STANDARD'}], - u'IsTruncated': False, - u'Marker': "", - u'MaxKeys': 1000, - u'Name': 'mybucket', - u'Prefix': "", - 'ResponseMetadata': { - 'RequestId': 'XXXXXXXXXXXXXXXX', - 'HostId': 'AAAAAAAAAAAAAAAAAAA', - 'HTTPStatusCode': 200, - }} + { + 'Contents': [ + { + 'ETag': '"00000000000000000000000000000000"', + 'Key': 'test.png', + 'LastModified': datetime.datetime(2014, 3, 1, 17, 6, 40, tzinfo=tzutc()), + 'Owner': { + 'DisplayName': 'dummy', + 'ID': 'AAAAAAAAAAAAAAAAAAA' + }, + 'Size': 6702, + 'StorageClass': 'STANDARD' + } + ], + 'IsTruncated': False, + 'Marker': "", + 'MaxKeys': 1000, + 'Name': 'mybucket', + 'Prefix': "", + 'ResponseMetadata': { + 'RequestId': 'XXXXXXXXXXXXXXXX', + 'HostId': 'AAAAAAAAAAAAAAAAAAA', + 'HTTPStatusCode': 200, + } + } ) diff --git a/tests/unit/test_retryhandler.py b/tests/unit/test_retryhandler.py index 3770007195..20d5d1dcdb 100644 --- a/tests/unit/test_retryhandler.py +++ b/tests/unit/test_retryhandler.py @@ -211,8 +211,9 @@ def test_create_retry_handler_with_socket_errors(self): # But any other exception should be raised even if # attempts < max_attempts. with self.assertRaises(ValueError): - sleep_time = handler(response=None, attempts=1, - caught_exception=ValueError()) + sleep_time = handler( + response=None, attempts=1, caught_exception=ValueError() + ) def test_connection_timeouts_are_retried(self): # If a connection times out, we get a Timout exception @@ -253,7 +254,7 @@ class TestRetryHandler(unittest.TestCase): def test_action_tied_to_policy(self): # When a retry rule matches we should return the # amount of time to sleep, otherwise we should return None. - delay_function = retryhandler.create_exponential_delay_function( 1, 2) + delay_function = retryhandler.create_exponential_delay_function(1, 2) checker = retryhandler.HTTPStatusCodeChecker(500) handler = retryhandler.RetryHandler(checker, delay_function) response = (HTTP_500_RESPONSE, {}) @@ -268,7 +269,7 @@ def test_action_tied_to_policy(self): handler(response=response, attempts=4, caught_exception=None), 8) def test_none_response_when_no_matches(self): - delay_function = retryhandler.create_exponential_delay_function( 1, 2) + delay_function = retryhandler.create_exponential_delay_function(1, 2) checker = retryhandler.HTTPStatusCodeChecker(500) handler = retryhandler.RetryHandler(checker, delay_function) response = (HTTP_200_RESPONSE, {}) diff --git a/tests/unit/test_serialize.py b/tests/unit/test_serialize.py index 4f7cbe9ba6..c1d739c7be 100644 --- a/tests/unit/test_serialize.py +++ b/tests/unit/test_serialize.py @@ -21,7 +21,6 @@ from botocore import serialize from botocore.compat import six from botocore.exceptions import ParamValidationError -from botocore.serialize import SERIALIZERS class BaseModelWithBlob(unittest.TestCase): @@ -394,13 +393,17 @@ def test_instantiate_without_validation(self): self.assert_serialize_valid_parameter(request_serializer) except ParamValidationError as e: self.fail( - "Shouldn't fail serializing valid parameter without validation".format(e)) + "Shouldn't fail serializing valid parameter without " + "validation: {}".format(e) + ) try: self.assert_serialize_invalid_parameter(request_serializer) except ParamValidationError as e: self.fail( - "Shouldn't fail serializing invalid parameter without validation".format(e)) + "Shouldn't fail serializing invalid parameter without " + "validation: {}".format(e) + ) def test_instantiate_with_validation(self): request_serializer = serialize.create_serializer( @@ -409,7 +412,9 @@ def test_instantiate_with_validation(self): self.assert_serialize_valid_parameter(request_serializer) except ParamValidationError as e: self.fail( - "Shouldn't fail serializing valid parameter with validation".format(e)) + "Shouldn't fail serializing invalid parameter without " + "validation: {}".format(e) + ) with self.assertRaises(ParamValidationError): self.assert_serialize_invalid_parameter(request_serializer) diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 5431a069c1..38fd5a9838 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -676,10 +676,10 @@ def test_can_specify_multiple_versions_from_config(self, client_creator): del self.environ['FOO_PROFILE'] self.environ['FOO_CONFIG_FILE'] = f.name f.write('[default]\n') - f.write('foo_api_versions =\n' - ' myservice = %s\n' - ' myservice2 = %s\n' % ( - config_api_version, second_config_api_version) + f.write( + f'foo_api_versions =\n' + f' myservice = {config_api_version}\n' + f' myservice2 = {second_config_api_version}\n' ) f.flush() @@ -754,9 +754,10 @@ def assert_created_client_is_monitored(self, session): client.meta.events) def assert_monitoring_host_and_port(self, session, host, port): - with mock.patch('botocore.monitoring.SocketPublisher', - spec=True) as mock_publisher: - client = session.create_client('ec2', 'us-west-2') + with mock.patch( + 'botocore.monitoring.SocketPublisher', spec=True + ) as mock_publisher: + session.create_client('ec2', 'us-west-2') self.assertEqual(mock_publisher.call_count, 1) _, args, kwargs = mock_publisher.mock_calls[0] self.assertEqual(kwargs.get('host'), host) @@ -897,7 +898,7 @@ def test_new_session_with_unknown_region(self): def test_new_session_with_invalid_region(self): with self.assertRaises(botocore.exceptions.InvalidRegionError): - s3_client = self.session.create_client('s3', 'not.a.real#region') + self.session.create_client('s3', 'not.a.real#region') def test_new_session_with_none_region(self): s3_client = self.session.create_client('s3', region_name=None) diff --git a/tests/unit/test_session_legacy.py b/tests/unit/test_session_legacy.py index 0c7d5f253d..77bdb772f6 100644 --- a/tests/unit/test_session_legacy.py +++ b/tests/unit/test_session_legacy.py @@ -659,10 +659,10 @@ def test_can_specify_multiple_versions_from_config(self, client_creator): self.environ['FOO_CONFIG_FILE'] = f.name self.session = create_session(session_vars=self.env_vars) f.write('[default]\n') - f.write('foo_api_versions =\n' - ' myservice = %s\n' - ' myservice2 = %s\n' % ( - config_api_version, second_config_api_version) + f.write( + f'foo_api_versions =\n' + f' myservice = {config_api_version}\n' + f' myservice2 = {second_config_api_version}\n' ) f.flush() diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py index 24dcbadbdd..48fa238cad 100644 --- a/tests/unit/test_signers.py +++ b/tests/unit/test_signers.py @@ -322,10 +322,8 @@ def test_no_credentials_case_is_forwarded_to_signer(self): ServiceId('service_name'), 'region_name', 'signing_name', 'v4', self.credentials, self.emitter) auth_cls = mock.Mock() - with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, - {'v4': auth_cls}): - auth = self.signer.get_auth_instance( - 'service_name', 'region_name', 'v4') + with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, {'v4': auth_cls}): + self.signer.get_auth_instance('service_name', 'region_name', 'v4') auth_cls.assert_called_with( service_name='service_name', region_name='region_name', @@ -1004,4 +1002,3 @@ def test_custom_region(self): self.assertIn(region, result) # The hostname won't be changed even if a different region is specified self.assertIn(hostname, result) - diff --git a/tests/unit/test_stub.py b/tests/unit/test_stub.py index 9bcd3730eb..018d154863 100644 --- a/tests/unit/test_stub.py +++ b/tests/unit/test_stub.py @@ -14,7 +14,7 @@ from tests import unittest from botocore.stub import Stubber -from botocore.exceptions import ParamValidationError, StubResponseError, UnStubbedResponseError +from botocore.exceptions import ParamValidationError, UnStubbedResponseError from botocore.model import ServiceModel from botocore import hooks @@ -190,7 +190,6 @@ def test_get_client_error_with_extra_response_meta(self): self.assertIn('RequestId', actual_response_meta) self.assertEqual(actual_response_meta['RequestId'], "79104EXAMPLEB723") - def test_get_response_errors_with_no_stubs(self): self.stubber.activate() with self.assertRaises(UnStubbedResponseError): diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 9e79c11101..8119d615a7 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -22,10 +22,9 @@ import botocore from botocore import xform_name -from botocore.compat import OrderedDict, json +from botocore.compat import json from botocore.compat import six from botocore.awsrequest import AWSRequest -from botocore.awsrequest import AWSResponse from botocore.exceptions import InvalidExpressionError, ConfigNotFound from botocore.exceptions import ClientError, ConnectionClosedError from botocore.exceptions import InvalidDNSNameError, MetadataRetrievalError @@ -38,7 +37,6 @@ from botocore.exceptions import UnsupportedOutpostResourceError from botocore.model import ServiceModel from botocore.model import OperationModel -from botocore.regions import EndpointResolver from botocore.utils import ensure_boolean from botocore.utils import resolve_imds_endpoint_mode from botocore.utils import is_json_value_header @@ -76,8 +74,6 @@ from botocore.utils import SSOTokenLoader from botocore.utils import is_valid_uri, is_valid_ipv6_endpoint_url from botocore.exceptions import SSOTokenLoadError -from botocore.utils import IMDSFetcher -from botocore.utils import BadIMDSRequestError from botocore.model import DenormalizedStructureBuilder from botocore.model import ShapeResolver from botocore.config import Config @@ -117,7 +113,7 @@ def test_resolve_endpoint_mode_IPv6(self): session = self.create_session_with_config('IPv6', None) self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv6') - def test_resolve_endpoint_mode_IPv6(self): + def test_resolve_endpoint_mode_IPv4(self): session = self.create_session_with_config('IPv4', None) self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv4') @@ -192,7 +188,6 @@ def test_json_value_header(self): self.assertTrue(is_json_value_header(shape)) - class TestURINormalization(unittest.TestCase): def test_remove_dot_segments(self): self.assertEqual(remove_dot_segments('../foo'), 'foo') @@ -200,8 +195,7 @@ def test_remove_dot_segments(self): self.assertEqual(remove_dot_segments('./foo'), 'foo') self.assertEqual(remove_dot_segments('/./'), '/') self.assertEqual(remove_dot_segments('/../'), '/') - self.assertEqual(remove_dot_segments('/foo/bar/baz/../qux'), - '/foo/bar/qux') + self.assertEqual(remove_dot_segments('/foo/bar/baz/../qux'), '/foo/bar/qux') self.assertEqual(remove_dot_segments('/foo/..'), '/') self.assertEqual(remove_dot_segments('foo/bar/baz'), 'foo/bar/baz') self.assertEqual(remove_dot_segments('..'), '') @@ -637,7 +631,7 @@ def test_generate_scalar_map(self): 'A': { 'type': 'map', 'key': {'type': 'string'}, - 'value': {'type': 'string'}, + 'value': {'type': 'string'}, } }, generated_skeleton={ @@ -1201,27 +1195,34 @@ def test_percent_encode_special_chars(self): 'k1=with%20spaces%2B%2B%2F') def test_percent_encode_string_string_tuples(self): - self.assertEqual(percent_encode_sequence([('k1', 'v1'), ('k2', 'v2')]), - 'k1=v1&k2=v2') + self.assertEqual( + percent_encode_sequence([('k1', 'v1'), ('k2', 'v2')]), + 'k1=v1&k2=v2' + ) def test_percent_encode_dict_single_pair(self): self.assertEqual(percent_encode_sequence({'k1': 'v1'}), 'k1=v1') def test_percent_encode_dict_string_string(self): self.assertEqual( - percent_encode_sequence(OrderedDict([('k1', 'v1'), ('k2', 'v2')])), - 'k1=v1&k2=v2') + percent_encode_sequence({'k1': 'v1', 'k2': 'v2'}), + 'k1=v1&k2=v2' + ) def test_percent_encode_single_list_of_values(self): - self.assertEqual(percent_encode_sequence({'k1': ['a', 'b', 'c']}), - 'k1=a&k1=b&k1=c') + self.assertEqual( + percent_encode_sequence({'k1': ['a', 'b', 'c']}), + 'k1=a&k1=b&k1=c' + ) def test_percent_encode_list_values_of_string(self): self.assertEqual( percent_encode_sequence( - OrderedDict([('k1', ['a', 'list']), - ('k2', ['another', 'list'])])), - 'k1=a&k1=list&k2=another&k2=list') + {'k1': ['a', 'list'], 'k2': ['another', 'list']} + ), + 'k1=a&k1=list&k2=another&k2=list' + ) + class TestPercentEncode(unittest.TestCase): def test_percent_encode_obj(self): @@ -1245,6 +1246,7 @@ def test_percent_encode_bytes(self): # Arbitrary bytes (not valid UTF-8). self.assertEqual(percent_encode(b'\x80\x00'), '%80%00') + class TestSwitchHostS3Accelerate(unittest.TestCase): def setUp(self): self.original_url = 'https://s3.amazonaws.com/foo/key.txt' @@ -2196,12 +2198,11 @@ def test_can_specify_extra_headers_are_merged(self): } self.set_http_responses_to({'foo': 'bar'}) fetcher = self.create_fetcher() - response = fetcher.retrieve_full_uri( - 'http://localhost', headers) + fetcher.retrieve_full_uri('http://localhost', headers) self.assert_request('GET', 'http://localhost', headers) def test_can_retrieve_uri(self): - json_body = { + json_body = { "AccessKeyId" : "a", "SecretAccessKey" : "b", "Token" : "c", @@ -2422,17 +2423,19 @@ def test_disabling_env_var_not_true(self): self.assertEqual(result, self._expected_creds) def test_ec2_metadata_endpoint_service_mode(self): - configs = [({'ec2_metadata_service_endpoint_mode': 'ipv6'}, - 'http://[fd00:ec2::254]/'), - ({'ec2_metadata_service_endpoint_mode': 'ipv6'}, - 'http://[fd00:ec2::254]/'), - ({'ec2_metadata_service_endpoint_mode': 'ipv4'}, - 'http://169.254.169.254/'), - ({'ec2_metadata_service_endpoint_mode': 'foo'}, - 'http://169.254.169.254/'), - ({'ec2_metadata_service_endpoint_mode': 'ipv6', - 'ec2_metadata_service_endpoint': 'http://[fd00:ec2::010]/'}, - 'http://[fd00:ec2::010]/')] + configs = [ + ({'ec2_metadata_service_endpoint_mode': 'ipv6'}, + 'http://[fd00:ec2::254]/'), + ({'ec2_metadata_service_endpoint_mode': 'ipv6'}, + 'http://[fd00:ec2::254]/'), + ({'ec2_metadata_service_endpoint_mode': 'ipv4'}, + 'http://169.254.169.254/'), + ({'ec2_metadata_service_endpoint_mode': 'foo'}, + 'http://169.254.169.254/'), + ({'ec2_metadata_service_endpoint_mode': 'ipv6', + 'ec2_metadata_service_endpoint': 'http://[fd00:ec2::010]/'}, + 'http://[fd00:ec2::010]/') + ] for config, expected_url in configs: self._test_imds_base_url(config, expected_url) @@ -2445,7 +2448,6 @@ def test_metadata_endpoint(self): def test_ipv6_endpoint_no_brackets_env_var_set(self): url = 'http://fd00:ec2::010/' - config = {'ec2_metadata_service_endpoint': url} self.assertFalse(is_valid_ipv6_endpoint_url(url)) def test_ipv6_invalid_endpoint(self): @@ -2479,10 +2481,12 @@ def test_ipv6_imds_not_allocated(self): self.assertEqual(result, {}) def test_ipv6_imds_empty_config(self): - configs = [({'ec2_metadata_service_endpoint': ''},'http://169.254.169.254/'), - ({'ec2_metadata_service_endpoint_mode': ''}, 'http://169.254.169.254/'), - ({}, 'http://169.254.169.254/'), - (None, 'http://169.254.169.254/')] + configs = [ + ({'ec2_metadata_service_endpoint': ''}, 'http://169.254.169.254/'), + ({'ec2_metadata_service_endpoint_mode': ''}, 'http://169.254.169.254/'), + ({}, 'http://169.254.169.254/'), + (None, 'http://169.254.169.254/') + ] for config, expected_url in configs: self._test_imds_base_url(config, expected_url) @@ -2706,9 +2710,9 @@ def test_can_load_token_exists(self): def test_can_handle_does_not_exist(self): with self.assertRaises(SSOTokenLoadError): - access_token = self.loader(self.start_url) + self.loader(self.start_url) def test_can_handle_invalid_cache(self): self.cache[self.cache_key] = {} with self.assertRaises(SSOTokenLoadError): - access_token = self.loader(self.start_url) + self.loader(self.start_url) diff --git a/tests/unit/test_validate.py b/tests/unit/test_validate.py index b50d3cacc5..ae238aa0d5 100644 --- a/tests/unit/test_validate.py +++ b/tests/unit/test_validate.py @@ -32,7 +32,6 @@ def get_validation_error_message(self, given_shapes, input_params): input_shape = s.get_shape_by_name('Input') validator = ParamValidator() errors_found = validator.validate(input_params, input_shape) - error_message = errors_found.generate_report() return errors_found @@ -169,7 +168,6 @@ def test_accepts_document_type_string(self): error_msg = errors.generate_report() self.assertEqual(error_msg, '') - def test_validate_document_type_string(self): self.shapes = { 'Input': { @@ -235,7 +233,6 @@ def test_accepts_one_member(self): error_msg = errors.generate_report() self.assertEqual(error_msg, '') - def test_validate_one_member_is_set(self): self.shapes = { 'Input': { @@ -387,7 +384,7 @@ def test_validate_string(self): def test_datetime_type_accepts_datetime_obj(self): errors = self.get_validation_error_message( given_shapes=self.shapes, - input_params={'Timestamp': datetime.now(),}) + input_params={'Timestamp': datetime.now()}) error_msg = errors.generate_report() self.assertEqual(error_msg, '') diff --git a/tests/unit/test_waiters.py b/tests/unit/test_waiters.py index d98b040c61..e6e6a78fed 100644 --- a/tests/unit/test_waiters.py +++ b/tests/unit/test_waiters.py @@ -420,37 +420,64 @@ def _assert_failure_state_error_raised(self, acceptors, responses, expected_msg) def test_waiter_failure_state_error(self): test_cases = [ - ([{'state': 'failure', 'matcher': 'path', - 'argument': 'Foo', 'expected': 'FAILURE'}], - [{'Foo': 'FAILURE'}], - 'FAILURE'), - ([{'state': 'failure', 'matcher': 'pathAll', - 'argument': 'Tables[].State', 'expected': 'FAILURE'}], - [{'Tables': [{"State": "FAILURE"}]}], - 'FAILURE'), - ([{'state': 'failure', 'matcher': 'pathAny', - 'argument': 'Tables[].State', 'expected': 'FAILURE'}], - [{'Tables': [{"State": "FAILURE"}]}], - 'FAILURE'), - ([{'state': 'failure', 'matcher': 'status', 'expected': 404}], - [{'ResponseMetadata': {'HTTPStatusCode': 404}}], - '404'), - ([{'state': 'failure', 'matcher': 'error', 'expected': 'FailError'}], - [{'Error': {'Code': 'FailError', 'Message': 'foo'}}], - 'FailError'), - ([{'state': 'retry', 'matcher': 'error', 'expected': 'RetryMe'}], - [{'Success': False}]*4, - 'Max attempts exceeded'), - ([ - {'state': 'success', 'matcher': 'status', 'expected': 200}, - {'state': 'retry', 'matcher': 'error', 'expected': 'RetryMe'}, - ], - [{'Success': False}, - {'Error': {'Code': 'RetryMe', 'Message': 'foo'}}, - {'Success': False}, - {'Success': False}, - ], - 'Previously accepted state'), + ( + [ + { + 'state': 'failure', 'matcher': 'path', + 'argument': 'Foo', 'expected': 'FAILURE' + } + ], + [{'Foo': 'FAILURE'}], + 'FAILURE' + ), + ( + [ + { + 'state': 'failure', 'matcher': 'pathAll', + 'argument': 'Tables[].State', 'expected': 'FAILURE' + } + ], + [{'Tables': [{"State": "FAILURE"}]}], + 'FAILURE' + ), + ( + [ + { + 'state': 'failure', 'matcher': 'pathAny', + 'argument': 'Tables[].State', 'expected': 'FAILURE' + } + ], + [{'Tables': [{"State": "FAILURE"}]}], + 'FAILURE' + ), + ( + [{'state': 'failure', 'matcher': 'status', 'expected': 404}], + [{'ResponseMetadata': {'HTTPStatusCode': 404}}], + '404' + ), + ( + [{'state': 'failure', 'matcher': 'error', 'expected': 'FailError'}], + [{'Error': {'Code': 'FailError', 'Message': 'foo'}}], + 'FailError' + ), + ( + [{'state': 'retry', 'matcher': 'error', 'expected': 'RetryMe'}], + [{'Success': False}]*4, + 'Max attempts exceeded' + ), + ( + [ + {'state': 'success', 'matcher': 'status', 'expected': 200}, + {'state': 'retry', 'matcher': 'error', 'expected': 'RetryMe'}, + ], + [ + {'Success': False}, + {'Error': {'Code': 'RetryMe', 'Message': 'foo'}}, + {'Success': False}, + {'Success': False}, + ], + 'Previously accepted state' + ), ] for acceptors, responses, expected_msg in test_cases: