From 830202d722c904c7e3da40e8dde7b9338d08752c Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Tue, 28 Nov 2023 18:56:12 +0000 Subject: [PATCH] Merge customizations for service s3 --- .../51dc2b2349b545baa70246b6e1d54dd9.json | 9 + Makefile | 4 +- aws/middleware/private/metrics/emf/emf.go | 90 + .../private/metrics/emf/emf_test.go | 145 + .../private/metrics/emf/emf_test_data.go | 104 + aws/middleware/private/metrics/metrics.go | 319 + .../private/metrics/metrics_test.go | 218 + .../metrics/middleware/configuration.go | 52 + .../middleware/endpoint_resolution_end.go | 36 + .../endpoint_resolution_end_test.go | 32 + .../middleware/endpoint_resolution_start.go | 36 + .../endpoint_resolution_start_test.go | 51 + .../private/metrics/middleware/http.go | 154 + .../private/metrics/middleware/http_test.go | 146 + .../metrics/middleware/metric_collection.go | 64 + .../middleware/metric_collection_test.go | 89 + .../private/metrics/middleware/request.go | 61 + .../metrics/middleware/request_attempt.go | 70 + .../middleware/request_attempt_test.go | 114 + .../metrics/middleware/request_test.go | 86 + .../middleware/stack_deserialize_end.go | 45 + .../middleware/stack_deserialize_end_test.go | 63 + .../middleware/stack_deserialize_start.go | 44 + .../stack_deserialize_start_test.go | 59 + .../metrics/middleware/stack_serialize_end.go | 33 + .../middleware/stack_serialize_end_test.go | 29 + .../middleware/stack_serialize_start.go | 32 + .../middleware/stack_serialize_start_test.go | 29 + .../private/metrics/middleware/transport.go | 46 + .../metrics/middleware/transport_test.go | 45 + .../metrics/middleware/wrap_data_stream.go | 59 + .../middleware/wrap_data_streams_test.go | 114 + .../private/metrics/publisher/emf.go | 155 + .../private/metrics/publisher/emf_test.go | 255 + .../metrics/publisher/emf_test_data.go | 227 + .../read_closer_with_metrics.go | 56 + .../read_closer_with_metrics_test.go | 83 + .../private/metrics/testutils/test_util.go | 107 + aws/retry/middleware.go | 8 + aws/signer/v4/middleware.go | 29 + aws/signer/v4/v4.go | 16 +- codegen/sdk-codegen/aws-models/s3.json | 6315 +++++++++++------ .../aws/go/codegen/AwsGoDependency.java | 1 + .../AwsHttpPresignURLClientGenerator.java | 8 +- .../ResolveClientConfigFromSources.java | 8 + .../smithy/aws/go/codegen/SdkGoTypes.java | 8 + .../customization/S3BucketContext.java | 29 + .../auth/S3ExpressAuthScheme.java | 161 + .../auth/SigV4S3ExpressTrait.java | 41 + .../s3/ExpressDefaultChecksum.java | 74 + ...mithy.go.codegen.integration.GoIntegration | 3 + config/env_config.go | 23 + config/load_options.go | 22 + config/shared_config.go | 20 + feature/s3/manager/upload.go | 45 + internal/auth/scheme.go | 17 +- internal/auth/scheme_test.go | 26 + internal/context/context.go | 39 + .../middleware_compute_input_checksum.go | 3 +- .../middleware_compute_input_checksum_test.go | 53 +- .../checksum/middleware_setup_context.go | 23 +- .../checksum/middleware_setup_context_test.go | 3 +- service/s3/api_client.go | 15 +- service/s3/api_op_AbortMultipartUpload.go | 88 +- service/s3/api_op_CompleteMultipartUpload.go | 225 +- service/s3/api_op_CopyObject.go | 639 +- service/s3/api_op_CreateBucket.go | 162 +- service/s3/api_op_CreateMultipartUpload.go | 536 +- service/s3/api_op_CreateSession.go | 260 + service/s3/api_op_DeleteBucket.go | 50 +- ...i_op_DeleteBucketAnalyticsConfiguration.go | 21 +- service/s3/api_op_DeleteBucketCors.go | 21 +- service/s3/api_op_DeleteBucketEncryption.go | 20 +- ...teBucketIntelligentTieringConfiguration.go | 11 +- ...i_op_DeleteBucketInventoryConfiguration.go | 24 +- service/s3/api_op_DeleteBucketLifecycle.go | 29 +- ...api_op_DeleteBucketMetricsConfiguration.go | 27 +- .../api_op_DeleteBucketOwnershipControls.go | 19 +- service/s3/api_op_DeleteBucketPolicy.go | 77 +- service/s3/api_op_DeleteBucketReplication.go | 22 +- service/s3/api_op_DeleteBucketTagging.go | 21 +- service/s3/api_op_DeleteBucketWebsite.go | 34 +- service/s3/api_op_DeleteObject.go | 137 +- service/s3/api_op_DeleteObjectTagging.go | 28 +- service/s3/api_op_DeleteObjects.go | 183 +- service/s3/api_op_DeletePublicAccessBlock.go | 21 +- ...api_op_GetBucketAccelerateConfiguration.go | 38 +- service/s3/api_op_GetBucketAcl.go | 43 +- .../api_op_GetBucketAnalyticsConfiguration.go | 25 +- service/s3/api_op_GetBucketCors.go | 47 +- service/s3/api_op_GetBucketEncryption.go | 22 +- ...etBucketIntelligentTieringConfiguration.go | 9 +- .../api_op_GetBucketInventoryConfiguration.go | 21 +- .../api_op_GetBucketLifecycleConfiguration.go | 26 +- service/s3/api_op_GetBucketLocation.go | 41 +- service/s3/api_op_GetBucketLogging.go | 18 +- .../api_op_GetBucketMetricsConfiguration.go | 26 +- ...i_op_GetBucketNotificationConfiguration.go | 52 +- .../s3/api_op_GetBucketOwnershipControls.go | 19 +- service/s3/api_op_GetBucketPolicy.go | 103 +- service/s3/api_op_GetBucketPolicyStatus.go | 21 +- service/s3/api_op_GetBucketReplication.go | 21 +- service/s3/api_op_GetBucketRequestPayment.go | 18 +- service/s3/api_op_GetBucketTagging.go | 21 +- service/s3/api_op_GetBucketVersioning.go | 23 +- service/s3/api_op_GetBucketWebsite.go | 19 +- service/s3/api_op_GetObject.go | 433 +- service/s3/api_op_GetObjectAcl.go | 34 +- service/s3/api_op_GetObjectAttributes.go | 168 +- service/s3/api_op_GetObjectLegalHold.go | 30 +- .../s3/api_op_GetObjectLockConfiguration.go | 22 +- service/s3/api_op_GetObjectRetention.go | 30 +- service/s3/api_op_GetObjectTagging.go | 45 +- service/s3/api_op_GetObjectTorrent.go | 31 +- service/s3/api_op_GetPublicAccessBlock.go | 20 +- service/s3/api_op_HeadBucket.go | 128 +- service/s3/api_op_HeadObject.go | 305 +- ...pi_op_ListBucketAnalyticsConfigurations.go | 31 +- ...tBucketIntelligentTieringConfigurations.go | 11 +- ...pi_op_ListBucketInventoryConfigurations.go | 23 +- .../api_op_ListBucketMetricsConfigurations.go | 22 +- service/s3/api_op_ListBuckets.go | 11 +- service/s3/api_op_ListDirectoryBuckets.go | 291 + service/s3/api_op_ListMultipartUploads.go | 172 +- service/s3/api_op_ListObjectVersions.go | 35 +- service/s3/api_op_ListObjects.go | 53 +- service/s3/api_op_ListObjectsV2.go | 172 +- service/s3/api_op_ListParts.go | 126 +- ...api_op_PutBucketAccelerateConfiguration.go | 39 +- service/s3/api_op_PutBucketAcl.go | 36 +- .../api_op_PutBucketAnalyticsConfiguration.go | 34 +- service/s3/api_op_PutBucketCors.go | 52 +- service/s3/api_op_PutBucketEncryption.go | 44 +- ...utBucketIntelligentTieringConfiguration.go | 11 +- .../api_op_PutBucketInventoryConfiguration.go | 38 +- .../api_op_PutBucketLifecycleConfiguration.go | 38 +- service/s3/api_op_PutBucketLogging.go | 49 +- .../api_op_PutBucketMetricsConfiguration.go | 31 +- ...i_op_PutBucketNotificationConfiguration.go | 17 +- .../s3/api_op_PutBucketOwnershipControls.go | 22 +- service/s3/api_op_PutBucketPolicy.go | 109 +- service/s3/api_op_PutBucketReplication.go | 31 +- service/s3/api_op_PutBucketRequestPayment.go | 36 +- service/s3/api_op_PutBucketTagging.go | 44 +- service/s3/api_op_PutBucketVersioning.go | 40 +- service/s3/api_op_PutBucketWebsite.go | 37 +- service/s3/api_op_PutObject.go | 403 +- service/s3/api_op_PutObjectAcl.go | 95 +- service/s3/api_op_PutObjectLegalHold.go | 43 +- .../s3/api_op_PutObjectLockConfiguration.go | 40 +- service/s3/api_op_PutObjectRetention.go | 45 +- service/s3/api_op_PutObjectTagging.go | 48 +- service/s3/api_op_PutPublicAccessBlock.go | 35 +- service/s3/api_op_RestoreObject.go | 48 +- service/s3/api_op_SelectObjectContent.go | 33 +- service/s3/api_op_UploadPart.go | 238 +- service/s3/api_op_UploadPartCopy.go | 288 +- service/s3/api_op_WriteGetObjectResponse.go | 12 +- service/s3/bucket_context.go | 47 + service/s3/create_mpu_checksum.go | 36 + service/s3/deserializers.go | 425 +- service/s3/endpoint_auth_resolver.go | 7 + service/s3/endpoints.go | 1057 ++- service/s3/endpoints_test.go | 1209 ++++ service/s3/express.go | 9 + service/s3/express_default.go | 123 + service/s3/express_resolve.go | 29 + service/s3/express_test.go | 174 + service/s3/generated.json | 2 + service/s3/internal/customizations/context.go | 21 + service/s3/internal/customizations/express.go | 44 + .../internal/customizations/express_config.go | 18 + .../express_default_checksum.go | 42 + .../customizations/express_properties.go | 21 + .../internal/customizations/express_signer.go | 109 + .../customizations/express_signer_smithy.go | 61 + .../internal/customizations/signer_wrapper.go | 41 +- .../customizations/update_endpoint_test.go | 2 +- service/s3/options.go | 18 + service/s3/serializers.go | 207 +- service/s3/types/enums.go | 78 +- service/s3/types/errors.go | 9 +- service/s3/types/types.go | 328 +- service/s3/validators.go | 39 + 184 files changed, 17179 insertions(+), 4825 deletions(-) create mode 100644 .changelog/51dc2b2349b545baa70246b6e1d54dd9.json create mode 100644 aws/middleware/private/metrics/emf/emf.go create mode 100644 aws/middleware/private/metrics/emf/emf_test.go create mode 100644 aws/middleware/private/metrics/emf/emf_test_data.go create mode 100644 aws/middleware/private/metrics/metrics.go create mode 100644 aws/middleware/private/metrics/metrics_test.go create mode 100644 aws/middleware/private/metrics/middleware/configuration.go create mode 100644 aws/middleware/private/metrics/middleware/endpoint_resolution_end.go create mode 100644 aws/middleware/private/metrics/middleware/endpoint_resolution_end_test.go create mode 100644 aws/middleware/private/metrics/middleware/endpoint_resolution_start.go create mode 100644 aws/middleware/private/metrics/middleware/endpoint_resolution_start_test.go create mode 100644 aws/middleware/private/metrics/middleware/http.go create mode 100644 aws/middleware/private/metrics/middleware/http_test.go create mode 100644 aws/middleware/private/metrics/middleware/metric_collection.go create mode 100644 aws/middleware/private/metrics/middleware/metric_collection_test.go create mode 100644 aws/middleware/private/metrics/middleware/request.go create mode 100644 aws/middleware/private/metrics/middleware/request_attempt.go create mode 100644 aws/middleware/private/metrics/middleware/request_attempt_test.go create mode 100644 aws/middleware/private/metrics/middleware/request_test.go create mode 100644 aws/middleware/private/metrics/middleware/stack_deserialize_end.go create mode 100644 aws/middleware/private/metrics/middleware/stack_deserialize_end_test.go create mode 100644 aws/middleware/private/metrics/middleware/stack_deserialize_start.go create mode 100644 aws/middleware/private/metrics/middleware/stack_deserialize_start_test.go create mode 100644 aws/middleware/private/metrics/middleware/stack_serialize_end.go create mode 100644 aws/middleware/private/metrics/middleware/stack_serialize_end_test.go create mode 100644 aws/middleware/private/metrics/middleware/stack_serialize_start.go create mode 100644 aws/middleware/private/metrics/middleware/stack_serialize_start_test.go create mode 100644 aws/middleware/private/metrics/middleware/transport.go create mode 100644 aws/middleware/private/metrics/middleware/transport_test.go create mode 100644 aws/middleware/private/metrics/middleware/wrap_data_stream.go create mode 100644 aws/middleware/private/metrics/middleware/wrap_data_streams_test.go create mode 100644 aws/middleware/private/metrics/publisher/emf.go create mode 100644 aws/middleware/private/metrics/publisher/emf_test.go create mode 100644 aws/middleware/private/metrics/publisher/emf_test_data.go create mode 100644 aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics.go create mode 100644 aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics_test.go create mode 100644 aws/middleware/private/metrics/testutils/test_util.go create mode 100644 codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/S3BucketContext.java create mode 100644 codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/S3ExpressAuthScheme.java create mode 100644 codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/SigV4S3ExpressTrait.java create mode 100644 codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/s3/ExpressDefaultChecksum.java create mode 100644 internal/context/context.go create mode 100644 service/s3/api_op_CreateSession.go create mode 100644 service/s3/api_op_ListDirectoryBuckets.go create mode 100644 service/s3/bucket_context.go create mode 100644 service/s3/create_mpu_checksum.go create mode 100644 service/s3/express.go create mode 100644 service/s3/express_default.go create mode 100644 service/s3/express_resolve.go create mode 100644 service/s3/express_test.go create mode 100644 service/s3/internal/customizations/context.go create mode 100644 service/s3/internal/customizations/express.go create mode 100644 service/s3/internal/customizations/express_config.go create mode 100644 service/s3/internal/customizations/express_default_checksum.go create mode 100644 service/s3/internal/customizations/express_properties.go create mode 100644 service/s3/internal/customizations/express_signer.go create mode 100644 service/s3/internal/customizations/express_signer_smithy.go diff --git a/.changelog/51dc2b2349b545baa70246b6e1d54dd9.json b/.changelog/51dc2b2349b545baa70246b6e1d54dd9.json new file mode 100644 index 00000000000..560eca3f957 --- /dev/null +++ b/.changelog/51dc2b2349b545baa70246b6e1d54dd9.json @@ -0,0 +1,9 @@ +{ + "id": "51dc2b23-49b5-45ba-a702-46b6e1d54dd9", + "type": "feature", + "description": "Add S3Express support.", + "modules": [ + "feature/s3/manager", + "service/s3" + ] +} diff --git a/Makefile b/Makefile index ed45b15d337..f41ab33c0e5 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,7 @@ LINT_IGNORE_S3MANAGER_INPUT='feature/s3/manager/upload.go:.+struct field SSEKMSK # Names of these are tied to endpoint rules and they're internal so ignore them LINT_IGNORE_AWSRULESFN_ARN='internal/endpoints/awsrulesfn/arn.go' LINT_IGNORE_AWSRULESFN_PARTITION='internal/endpoints/awsrulesfn/partition.go' +LINT_IGNORE_PRIVATE_METRICS='aws/middleware/private/metrics' UNIT_TEST_TAGS= BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest" @@ -472,7 +473,8 @@ lint: -e ${LINT_IGNORE_S3MANAGER_INPUT} \ -e ${LINTIGNORESINGLEFIGHT} \ -e ${LINT_IGNORE_AWSRULESFN_ARN} \ - -e ${LINT_IGNORE_AWSRULESFN_PARTITION}`; \ + -e ${LINT_IGNORE_AWSRULESFN_PARTITION} \ + -e ${LINT_IGNORE_PRIVATE_METRICS}`; \ echo "$$dolint"; \ if [ "$$dolint" != "" ]; then exit 1; fi diff --git a/aws/middleware/private/metrics/emf/emf.go b/aws/middleware/private/metrics/emf/emf.go new file mode 100644 index 00000000000..c9516f3753a --- /dev/null +++ b/aws/middleware/private/metrics/emf/emf.go @@ -0,0 +1,90 @@ +// Package emf implements an EMF metrics publisher. +// +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package emf + +import ( + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +const ( + emfIdentifier = "_aws" + timestampKey = "Timestamp" + cloudWatchMetricsKey = "CloudWatchMetrics" + namespaceKey = "Namespace" + dimensionsKey = "Dimensions" + metricsKey = "Metrics" +) + +// Entry represents a log entry in the EMF format. +type Entry struct { + namespace string + serializer metrics.Serializer + metrics []metric + dimensions [][]string + fields map[string]interface{} +} + +type metric struct { + Name string +} + +// NewEntry creates a new Entry with the specified namespace and serializer. +func NewEntry(namespace string, serializer metrics.Serializer) Entry { + return Entry{ + namespace: namespace, + serializer: serializer, + metrics: []metric{}, + dimensions: [][]string{{}}, + fields: map[string]interface{}{}, + } +} + +// Build constructs the EMF log entry as a JSON string. +func (e *Entry) Build() (string, error) { + + entry := map[string]interface{}{} + + entry[emfIdentifier] = map[string]interface{}{ + timestampKey: sdk.NowTime().UnixNano() / 1e6, + cloudWatchMetricsKey: []map[string]interface{}{ + { + namespaceKey: e.namespace, + dimensionsKey: e.dimensions, + metricsKey: e.metrics, + }, + }, + } + + for k, v := range e.fields { + entry[k] = v + } + + jsonEntry, err := e.serializer.Serialize(entry) + if err != nil { + return "", err + } + return jsonEntry, nil +} + +// AddDimension adds a CW Dimension to the EMF entry. +func (e *Entry) AddDimension(key string, value string) { + // Dimensions are a list of lists. We only support a single list. + e.dimensions[0] = append(e.dimensions[0], key) + e.fields[key] = value +} + +// AddMetric adds a CW Metric to the EMF entry. +func (e *Entry) AddMetric(key string, value float64) { + e.metrics = append(e.metrics, metric{key}) + e.fields[key] = value +} + +// AddProperty adds a CW Property to the EMF entry. +// Properties are not published as metrics, but they are available in logs and in CW insights. +func (e *Entry) AddProperty(key string, value interface{}) { + e.fields[key] = value +} diff --git a/aws/middleware/private/metrics/emf/emf_test.go b/aws/middleware/private/metrics/emf/emf_test.go new file mode 100644 index 00000000000..00deaada5d8 --- /dev/null +++ b/aws/middleware/private/metrics/emf/emf_test.go @@ -0,0 +1,145 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package emf + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +type TestSerializerWithError struct{} + +func (TestSerializerWithError) Serialize(obj interface{}) (string, error) { + return "", fmt.Errorf("serialization error") +} + +func TestCreateNewEntry(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + cases := map[string]struct { + Namespace string + ExpectedEntry Entry + }{ + "success": { + Namespace: "testNamespace", + ExpectedEntry: Entry{ + namespace: "testNamespace", + serializer: metrics.DefaultSerializer{}, + metrics: []metric{}, + dimensions: [][]string{{}}, + fields: map[string]interface{}{}, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + actualEntry := NewEntry(c.Namespace, metrics.DefaultSerializer{}) + if !reflect.DeepEqual(actualEntry, c.ExpectedEntry) { + t.Errorf("Entry contained unexpected values") + } + }) + } +} + +func TestBuild(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + cases := map[string]struct { + Namespace string + Configure func(entry *Entry) + Serializer metrics.Serializer + ExpectedError error + ExpectedResult string + }{ + "completeEntry": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Configure: func(entry *Entry) { + entry.AddMetric("testMetric1", 1) + entry.AddMetric("testMetric2", 2) + entry.AddDimension("testDimension1", "dim1") + entry.AddDimension("testDimension2", "dim2") + entry.AddProperty("testProperty1", "prop1") + entry.AddProperty("testProperty2", "prop2") + }, + ExpectedError: nil, + ExpectedResult: completeEntry, + }, + "noMetrics": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Configure: func(entry *Entry) { + entry.AddDimension("testDimension1", "dim1") + entry.AddDimension("testDimension2", "dim2") + entry.AddProperty("testProperty1", "prop1") + entry.AddProperty("testProperty2", "prop2") + }, + ExpectedError: nil, + ExpectedResult: noMetrics, + }, + "noDimensions": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Configure: func(entry *Entry) { + entry.AddMetric("testMetric1", 1) + entry.AddMetric("testMetric2", 2) + entry.AddProperty("testProperty1", "prop1") + entry.AddProperty("testProperty2", "prop2") + }, + ExpectedError: nil, + ExpectedResult: noDimensions, + }, + "noProperties": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Configure: func(entry *Entry) { + entry.AddMetric("testMetric1", 1) + entry.AddMetric("testMetric2", 2) + entry.AddDimension("testDimension1", "dim1") + entry.AddDimension("testDimension2", "dim2") + }, + ExpectedError: nil, + ExpectedResult: noProperties, + }, + "serializationError": { + Namespace: "testNamespace", + Serializer: TestSerializerWithError{}, + Configure: func(entry *Entry) { + }, + ExpectedError: fmt.Errorf("serialization error"), + ExpectedResult: "", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + entry := NewEntry(c.Namespace, c.Serializer) + + c.Configure(&entry) + + result, err := entry.Build() + + if !reflect.DeepEqual(err, c.ExpectedError) { + t.Errorf("Unexpected error, should be '%s' but was '%s'", c.ExpectedError, err) + } + + if !reflect.DeepEqual(result, c.ExpectedResult) { + t.Errorf("Unexpected result, should be '%s' but was '%s'", c.ExpectedResult, result) + } + }) + } +} diff --git a/aws/middleware/private/metrics/emf/emf_test_data.go b/aws/middleware/private/metrics/emf/emf_test_data.go new file mode 100644 index 00000000000..47102e014fd --- /dev/null +++ b/aws/middleware/private/metrics/emf/emf_test_data.go @@ -0,0 +1,104 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package emf + +import "strings" + +func stripString(str string) string { + str = strings.Replace(str, " ", "", -1) + str = strings.Replace(str, "\t", "", -1) + str = strings.Replace(str, "\n", "", -1) + return str +} + +var completeEntry = stripString(` +{ + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["testDimension1", "testDimension2"] + ], + "Metrics": [{ + "Name": "testMetric1" + }, { + "Name": "testMetric2" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + }, + "testDimension1": "dim1", + "testDimension2": "dim2", + "testMetric1": 1, + "testMetric2": 2, + "testProperty1": "prop1", + "testProperty2": "prop2" +} +`) + +var noMetrics = stripString(` +{ + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["testDimension1", "testDimension2"] + ], + "Metrics": [], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + }, + "testDimension1": "dim1", + "testDimension2": "dim2", + "testProperty1": "prop1", + "testProperty2": "prop2" +} +`) + +var noProperties = stripString(` +{ + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["testDimension1", "testDimension2"] + ], + "Metrics": [{ + "Name": "testMetric1" + }, { + "Name": "testMetric2" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + }, + "testDimension1": "dim1", + "testDimension2": "dim2", + "testMetric1": 1, + "testMetric2": 2 +} +`) + +var noDimensions = stripString(` +{ + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + [] + ], + "Metrics": [{ + "Name": "testMetric1" + }, { + "Name": "testMetric2" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + }, + "testMetric1": 1, + "testMetric2": 2, + "testProperty1": "prop1", + "testProperty2": "prop2" +} +`) diff --git a/aws/middleware/private/metrics/metrics.go b/aws/middleware/private/metrics/metrics.go new file mode 100644 index 00000000000..b0133f4c88d --- /dev/null +++ b/aws/middleware/private/metrics/metrics.go @@ -0,0 +1,319 @@ +// Package metrics implements metrics gathering for SDK development purposes. +// +// This package is designated as private and is intended for use only by the +// AWS client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package metrics + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/aws/smithy-go/middleware" +) + +const ( + // ServiceIDKey is the key for the service ID metric. + ServiceIDKey = "ServiceId" + // OperationNameKey is the key for the operation name metric. + OperationNameKey = "OperationName" + // ClientRequestIDKey is the key for the client request ID metric. + ClientRequestIDKey = "ClientRequestId" + // APICallDurationKey is the key for the API call duration metric. + APICallDurationKey = "ApiCallDuration" + // APICallSuccessfulKey is the key for the API call successful metric. + APICallSuccessfulKey = "ApiCallSuccessful" + // MarshallingDurationKey is the key for the marshalling duration metric. + MarshallingDurationKey = "MarshallingDuration" + // InThroughputKey is the key for the input throughput metric. + InThroughputKey = "InThroughput" + // OutThroughputKey is the key for the output throughput metric. + OutThroughputKey = "OutThroughput" + // RetryCountKey is the key for the retry count metric. + RetryCountKey = "RetryCount" + // HTTPStatusCodeKey is the key for the HTTP status code metric. + HTTPStatusCodeKey = "HttpStatusCode" + // AWSExtendedRequestIDKey is the key for the AWS extended request ID metric. + AWSExtendedRequestIDKey = "AwsExtendedRequestId" + // AWSRequestIDKey is the key for the AWS request ID metric. + AWSRequestIDKey = "AwsRequestId" + // BackoffDelayDurationKey is the key for the backoff delay duration metric. + BackoffDelayDurationKey = "BackoffDelayDuration" + // StreamThroughputKey is the key for the stream throughput metric. + StreamThroughputKey = "Throughput" + // ConcurrencyAcquireDurationKey is the key for the concurrency acquire duration metric. + ConcurrencyAcquireDurationKey = "ConcurrencyAcquireDuration" + // PendingConcurrencyAcquiresKey is the key for the pending concurrency acquires metric. + PendingConcurrencyAcquiresKey = "PendingConcurrencyAcquires" + // SigningDurationKey is the key for the signing duration metric. + SigningDurationKey = "SigningDuration" + // UnmarshallingDurationKey is the key for the unmarshalling duration metric. + UnmarshallingDurationKey = "UnmarshallingDuration" + // TimeToFirstByteKey is the key for the time to first byte metric. + TimeToFirstByteKey = "TimeToFirstByte" + // ServiceCallDurationKey is the key for the service call duration metric. + ServiceCallDurationKey = "ServiceCallDuration" + // EndpointResolutionDurationKey is the key for the endpoint resolution duration metric. + EndpointResolutionDurationKey = "EndpointResolutionDuration" + // AttemptNumberKey is the key for the attempt number metric. + AttemptNumberKey = "AttemptNumber" + // MaxConcurrencyKey is the key for the max concurrency metric. + MaxConcurrencyKey = "MaxConcurrency" + // AvailableConcurrencyKey is the key for the available concurrency metric. + AvailableConcurrencyKey = "AvailableConcurrency" +) + +// MetricPublisher provides the interface to provide custom MetricPublishers. +// PostRequestMetrics will be invoked by the MetricCollection middleware to post request. +// PostStreamMetrics will be invoked by ReadCloserWithMetrics to post stream metrics. +type MetricPublisher interface { + PostRequestMetrics(*MetricData) error + PostStreamMetrics(*MetricData) error +} + +// Serializer provides the interface to provide custom Serializers. +// Serialize will transform any input object in its corresponding string representation. +type Serializer interface { + Serialize(obj interface{}) (string, error) +} + +// DefaultSerializer is an implementation of the Serializer interface. +type DefaultSerializer struct{} + +// Serialize uses the default JSON serializer to obtain the string representation of an object. +func (DefaultSerializer) Serialize(obj interface{}) (string, error) { + bytes, err := json.Marshal(obj) + if err != nil { + return "", err + } + return string(bytes), nil +} + +type metricContextKey struct{} + +// MetricContext contains fields to store metric-related information. +type MetricContext struct { + connectionCounter *SharedConnectionCounter + publisher MetricPublisher + data *MetricData +} + +// MetricData stores the collected metric data. +type MetricData struct { + RequestStartTime time.Time + RequestEndTime time.Time + APICallDuration time.Duration + SerializeStartTime time.Time + SerializeEndTime time.Time + MarshallingDuration time.Duration + ResolveEndpointStartTime time.Time + ResolveEndpointEndTime time.Time + EndpointResolutionDuration time.Duration + InThroughput float64 + OutThroughput float64 + RetryCount int + Success uint8 + StatusCode int + ClientRequestID string + ServiceID string + OperationName string + PartitionID string + Region string + RequestContentLength int64 + Stream StreamMetrics + Attempts []AttemptMetrics +} + +// StreamMetrics stores metrics related to streaming data. +type StreamMetrics struct { + ReadDuration time.Duration + ReadBytes int64 + Throughput float64 +} + +// AttemptMetrics stores metrics related to individual attempts. +type AttemptMetrics struct { + ServiceCallStart time.Time + ServiceCallEnd time.Time + ServiceCallDuration time.Duration + FirstByteTime time.Time + TimeToFirstByte time.Duration + ConnRequestedTime time.Time + ConnObtainedTime time.Time + ConcurrencyAcquireDuration time.Duration + CredentialFetchStartTime time.Time + CredentialFetchEndTime time.Time + SignStartTime time.Time + SignEndTime time.Time + SigningDuration time.Duration + DeserializeStartTime time.Time + DeserializeEndTime time.Time + UnMarshallingDuration time.Duration + RetryDelay time.Duration + ResponseContentLength int64 + StatusCode int + RequestID string + ExtendedRequestID string + HTTPClient string + MaxConcurrency int + PendingConnectionAcquires int + AvailableConcurrency int + ActiveRequests int + ReusedConnection bool +} + +// Data returns the MetricData associated with the MetricContext. +func (mc *MetricContext) Data() *MetricData { + return mc.data +} + +// ConnectionCounter returns the SharedConnectionCounter associated with the MetricContext. +func (mc *MetricContext) ConnectionCounter() *SharedConnectionCounter { + return mc.connectionCounter +} + +// Publisher returns the MetricPublisher associated with the MetricContext. +func (mc *MetricContext) Publisher() MetricPublisher { + return mc.publisher +} + +// ComputeRequestMetrics calculates and populates derived metrics based on the collected data. +func (md *MetricData) ComputeRequestMetrics() { + + for idx := range md.Attempts { + attempt := &md.Attempts[idx] + attempt.ConcurrencyAcquireDuration = attempt.ConnObtainedTime.Sub(attempt.ConnRequestedTime) + attempt.SigningDuration = attempt.SignEndTime.Sub(attempt.SignStartTime) + attempt.UnMarshallingDuration = attempt.DeserializeEndTime.Sub(attempt.DeserializeStartTime) + attempt.TimeToFirstByte = attempt.FirstByteTime.Sub(attempt.ServiceCallStart) + attempt.ServiceCallDuration = attempt.ServiceCallEnd.Sub(attempt.ServiceCallStart) + } + + md.APICallDuration = md.RequestEndTime.Sub(md.RequestStartTime) + md.MarshallingDuration = md.SerializeEndTime.Sub(md.SerializeStartTime) + md.EndpointResolutionDuration = md.ResolveEndpointEndTime.Sub(md.ResolveEndpointStartTime) + + md.RetryCount = len(md.Attempts) - 1 + + latestAttempt, err := md.LatestAttempt() + + if err != nil { + fmt.Printf("error retrieving attempts data due to: %s. Skipping Throughput metrics", err.Error()) + } else { + + md.StatusCode = latestAttempt.StatusCode + + if md.Success == 1 { + if latestAttempt.ResponseContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { + md.InThroughput = float64(latestAttempt.ResponseContentLength) / latestAttempt.ServiceCallDuration.Seconds() + } + if md.RequestContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { + md.OutThroughput = float64(md.RequestContentLength) / latestAttempt.ServiceCallDuration.Seconds() + } + } + } +} + +// LatestAttempt returns the latest attempt metrics. +// It returns an error if no attempts are initialized. +func (md *MetricData) LatestAttempt() (*AttemptMetrics, error) { + if md.Attempts == nil || len(md.Attempts) == 0 { + return nil, fmt.Errorf("no attempts initialized. NewAttempt() should be called first") + } + return &md.Attempts[len(md.Attempts)-1], nil +} + +// NewAttempt initializes new attempt metrics. +func (md *MetricData) NewAttempt() { + if md.Attempts == nil { + md.Attempts = []AttemptMetrics{} + } + md.Attempts = append(md.Attempts, AttemptMetrics{}) +} + +// SharedConnectionCounter is a counter shared across API calls. +type SharedConnectionCounter struct { + mu sync.Mutex + + activeRequests int + pendingConnectionAcquire int +} + +// ActiveRequests returns the count of active requests. +func (cc *SharedConnectionCounter) ActiveRequests() int { + cc.mu.Lock() + defer cc.mu.Unlock() + + return cc.activeRequests +} + +// PendingConnectionAcquire returns the count of pending connection acquires. +func (cc *SharedConnectionCounter) PendingConnectionAcquire() int { + cc.mu.Lock() + defer cc.mu.Unlock() + + return cc.pendingConnectionAcquire +} + +// AddActiveRequest increments the count of active requests. +func (cc *SharedConnectionCounter) AddActiveRequest() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.activeRequests++ +} + +// RemoveActiveRequest decrements the count of active requests. +func (cc *SharedConnectionCounter) RemoveActiveRequest() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.activeRequests-- +} + +// AddPendingConnectionAcquire increments the count of pending connection acquires. +func (cc *SharedConnectionCounter) AddPendingConnectionAcquire() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.pendingConnectionAcquire++ +} + +// RemovePendingConnectionAcquire decrements the count of pending connection acquires. +func (cc *SharedConnectionCounter) RemovePendingConnectionAcquire() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.pendingConnectionAcquire-- +} + +// InitMetricContext initializes the metric context with the provided counter and publisher. +// It returns the updated context. +func InitMetricContext( + ctx context.Context, counter *SharedConnectionCounter, publisher MetricPublisher, +) context.Context { + if middleware.GetStackValue(ctx, metricContextKey{}) == nil { + ctx = middleware.WithStackValue(ctx, metricContextKey{}, &MetricContext{ + connectionCounter: counter, + publisher: publisher, + data: &MetricData{ + Attempts: []AttemptMetrics{}, + Stream: StreamMetrics{}, + }, + }) + } + return ctx +} + +// Context returns the metric context from the given context. +// It returns nil if the metric context is not found. +func Context(ctx context.Context) *MetricContext { + mctx := middleware.GetStackValue(ctx, metricContextKey{}) + if mctx == nil { + return nil + } + return mctx.(*MetricContext) +} diff --git a/aws/middleware/private/metrics/metrics_test.go b/aws/middleware/private/metrics/metrics_test.go new file mode 100644 index 00000000000..c8ab1418e58 --- /dev/null +++ b/aws/middleware/private/metrics/metrics_test.go @@ -0,0 +1,218 @@ +// This package is designated as private and is intended for use only by the +// AWS client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package metrics + +import ( + "context" + "testing" + "time" +) + +type TestPublisher struct{} + +func (tp *TestPublisher) PostRequestMetrics(data *MetricData) error { + return nil +} + +func (tp *TestPublisher) PostStreamMetrics(data *MetricData) error { + return nil +} + +func TestInitAndRetrieveMetricContext(t *testing.T) { + ctx := context.Background() + cc := SharedConnectionCounter{} + tp := TestPublisher{} + + ctx = InitMetricContext(ctx, &cc, &tp) + mctx := Context(ctx) + + if mctx == nil { + t.Errorf("Metric context should not be nil") + } + if mctx.publisher != &tp || mctx.Publisher() != &tp { + t.Errorf("Unexpected publisher") + } + if mctx.connectionCounter != &cc || mctx.ConnectionCounter() != &cc { + t.Errorf("Unexpected connection counter") + } +} + +func TestConnectionCounter(t *testing.T) { + cc := SharedConnectionCounter{} + cc.AddPendingConnectionAcquire() + cc.AddPendingConnectionAcquire() + cc.RemovePendingConnectionAcquire() + cc.AddActiveRequest() + cc.AddActiveRequest() + cc.AddActiveRequest() + cc.RemoveActiveRequest() + + if cc.PendingConnectionAcquire() != 1 { + t.Errorf("Unexpected count for PendingConnectionAcquire") + } + + if cc.ActiveRequests() != 2 { + t.Errorf("Unexpected count for ActiveRequests") + } +} + +func TestAttemptCreationAndRetrieval(t *testing.T) { + ctx := context.TODO() + cc := SharedConnectionCounter{} + tp := TestPublisher{} + + ctx = InitMetricContext(ctx, &cc, &tp) + + mctx := Context(ctx) + + _, err := mctx.Data().LatestAttempt() + + if err == nil { + t.Errorf("Expected error for uninitialized attempt") + } + + mctx.Data().NewAttempt() + + if len(mctx.Data().Attempts) != 1 { + t.Errorf("Unexpected number of attempts") + } + + _, err = mctx.Data().LatestAttempt() + + if err != nil { + t.Errorf("Unexpected error for uninitialized attempt") + } +} + +func TestMetricData_ComputeRequestMetrics(t *testing.T) { + + data := MetricData{ + RequestStartTime: time.Unix(1234, 0), + RequestEndTime: time.Unix(1434, 0), + SerializeStartTime: time.Unix(1234, 0), + SerializeEndTime: time.Unix(1434, 0), + ResolveEndpointStartTime: time.Unix(1234, 0), + ResolveEndpointEndTime: time.Unix(1434, 0), + Success: 1, + ClientRequestID: "crid", + ServiceID: "sid", + OperationName: "operationname", + PartitionID: "partitionid", + Region: "region", + RequestContentLength: 100, + Stream: StreamMetrics{}, + Attempts: []AttemptMetrics{{ + ServiceCallStart: time.Unix(1234, 0), + ServiceCallEnd: time.Unix(1434, 0), + FirstByteTime: time.Unix(1334, 0), + ConnRequestedTime: time.Unix(1234, 0), + ConnObtainedTime: time.Unix(1434, 0), + CredentialFetchStartTime: time.Unix(1234, 0), + CredentialFetchEndTime: time.Unix(1434, 0), + SignStartTime: time.Unix(1234, 0), + SignEndTime: time.Unix(1434, 0), + DeserializeStartTime: time.Unix(1234, 0), + DeserializeEndTime: time.Unix(1434, 0), + RetryDelay: 100, + ResponseContentLength: 100, + StatusCode: 200, + RequestID: "reqid", + ExtendedRequestID: "exreqid", + HTTPClient: "Default", + MaxConcurrency: 10, + PendingConnectionAcquires: 1, + AvailableConcurrency: 2, + ActiveRequests: 3, + ReusedConnection: false, + }, + { + ServiceCallStart: time.Unix(1234, 0), + ServiceCallEnd: time.Unix(1434, 0), + FirstByteTime: time.Unix(1334, 0), + ConnRequestedTime: time.Unix(1234, 0), + ConnObtainedTime: time.Unix(1434, 0), + CredentialFetchStartTime: time.Unix(1234, 0), + CredentialFetchEndTime: time.Unix(1434, 0), + SignStartTime: time.Unix(1234, 0), + SignEndTime: time.Unix(1434, 0), + DeserializeStartTime: time.Unix(1234, 0), + DeserializeEndTime: time.Unix(1434, 0), + RetryDelay: 100, + ResponseContentLength: 100, + StatusCode: 200, + RequestID: "reqid", + ExtendedRequestID: "exreqid", + HTTPClient: "Default", + MaxConcurrency: 10, + PendingConnectionAcquires: 1, + AvailableConcurrency: 2, + ActiveRequests: 3, + ReusedConnection: false, + }}, + } + + data.ComputeRequestMetrics() + + expectedAPICallDuration := time.Second * 200 + actualAPICallDuration := data.APICallDuration + + if expectedAPICallDuration != actualAPICallDuration { + t.Errorf("Unexpected ApiCallDuration, should be '%s' but was '%s'", expectedAPICallDuration, actualAPICallDuration) + } + + expectedMarshallingDuration := time.Second * 200 + actualMarshallingDuration := data.MarshallingDuration + + if expectedMarshallingDuration != actualMarshallingDuration { + t.Errorf("Unexpected MarshallingDuration, should be '%s' but was '%s'", expectedMarshallingDuration, actualMarshallingDuration) + } + + expectedEndpointResolutionDuration := time.Second * 200 + actualEndpointResolutionDuration := data.EndpointResolutionDuration + + if expectedEndpointResolutionDuration != actualEndpointResolutionDuration { + t.Errorf("Unexpected EndpointResolutionDuration, should be '%s' but was '%s'", expectedEndpointResolutionDuration, actualEndpointResolutionDuration) + } + + for idx := range data.Attempts { + + attempt := data.Attempts[idx] + + expectedServiceCallDuration := time.Second * 200 + actualServiceCallDuration := attempt.ServiceCallDuration + + if expectedServiceCallDuration != actualServiceCallDuration { + t.Errorf("Unexpected ServiceCallDuration, should be '%s' but was '%s'", expectedServiceCallDuration, actualServiceCallDuration) + } + + expectedTimeToFirstByte := time.Second * 100 + actualTimeToFirstByte := attempt.TimeToFirstByte + + if expectedTimeToFirstByte != actualTimeToFirstByte { + t.Errorf("Unexpected TimeToFirstByte, should be '%s' but was '%s'", expectedTimeToFirstByte, actualTimeToFirstByte) + } + + expectedConcurrencyAcquireDuration := time.Second * 200 + actualConcurrencyAcquireDuration := attempt.ConcurrencyAcquireDuration + + if expectedConcurrencyAcquireDuration != actualConcurrencyAcquireDuration { + t.Errorf("Unexpected ConcurrencyAcquireDuration, should be '%s' but was '%s'", expectedConcurrencyAcquireDuration, actualConcurrencyAcquireDuration) + } + + expectedSigningDuration := time.Second * 200 + actualSigningDuration := attempt.SigningDuration + + if expectedSigningDuration != actualSigningDuration { + t.Errorf("Unexpected SigningDuration, should be '%s' but was '%s'", expectedSigningDuration, actualSigningDuration) + } + + expectedUnMarshallingDuration := time.Second * 200 + actualUnMarshallingDuration := attempt.UnMarshallingDuration + + if expectedUnMarshallingDuration != actualUnMarshallingDuration { + t.Errorf("Unexpected UnMarshallingDuration, should be '%s' but was '%s'", expectedUnMarshallingDuration, actualUnMarshallingDuration) + } + } +} diff --git a/aws/middleware/private/metrics/middleware/configuration.go b/aws/middleware/private/metrics/middleware/configuration.go new file mode 100644 index 00000000000..2bfe2543aa6 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/configuration.go @@ -0,0 +1,52 @@ +package middleware + +import ( + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/smithy-go/middleware" + "net/http" +) + +func WithMetricMiddlewares( + publisher metrics.MetricPublisher, client *http.Client, +) func(stack *middleware.Stack) error { + connectionCounter := &metrics.SharedConnectionCounter{} + return func(stack *middleware.Stack) error { + if err := stack.Initialize.Add(GetSetupMetricCollectionMiddleware(connectionCounter, publisher), middleware.Before); err != nil { + return err + } + if err := stack.Serialize.Add(GetRecordStackSerializeStartMiddleware(), middleware.Before); err != nil { + return err + } + if err := stack.Serialize.Add(GetRecordStackSerializeEndMiddleware(), middleware.After); err != nil { + return err + } + if err := stack.Serialize.Insert(GetRecordEndpointResolutionStartMiddleware(), "ResolveEndpoint", middleware.Before); err != nil { + return err + } + if err := stack.Serialize.Insert(GetRecordEndpointResolutionEndMiddleware(), "ResolveEndpoint", middleware.After); err != nil { + return err + } + if err := stack.Build.Add(GetWrapDataStreamMiddleware(), middleware.After); err != nil { + return err + } + if err := stack.Finalize.Add(GetRegisterRequestMetricContextMiddleware(), middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(GetRegisterAttemptMetricContextMiddleware(), "Retry", middleware.After); err != nil { + return err + } + if err := stack.Finalize.Add(GetHttpMetricMiddleware(client), middleware.After); err != nil { + return err + } + if err := stack.Deserialize.Add(GetRecordStackDeserializeStartMiddleware(), middleware.After); err != nil { + return err + } + if err := stack.Deserialize.Add(GetRecordStackDeserializeEndMiddleware(), middleware.Before); err != nil { + return err + } + if err := stack.Deserialize.Insert(GetTransportMetricsMiddleware(), "StackDeserializeStart", middleware.After); err != nil { + return err + } + return nil + } +} diff --git a/aws/middleware/private/metrics/middleware/endpoint_resolution_end.go b/aws/middleware/private/metrics/middleware/endpoint_resolution_end.go new file mode 100644 index 00000000000..83c1867cfc2 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/endpoint_resolution_end.go @@ -0,0 +1,36 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type EndpointResolutionEnd struct{} + +func GetRecordEndpointResolutionEndMiddleware() *EndpointResolutionEnd { + return &EndpointResolutionEnd{} +} + +func (m *EndpointResolutionEnd) ID() string { + return "EndpointResolutionEnd" +} + +func (m *EndpointResolutionEnd) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + mctx := metrics.Context(ctx) + mctx.Data().ResolveEndpointEndTime = sdk.NowTime() + + out, metadata, err = next.HandleSerialize(ctx, in) + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/endpoint_resolution_end_test.go b/aws/middleware/private/metrics/middleware/endpoint_resolution_end_test.go new file mode 100644 index 00000000000..6a82b5eda3f --- /dev/null +++ b/aws/middleware/private/metrics/middleware/endpoint_resolution_end_test.go @@ -0,0 +1,32 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + "testing" + "time" +) + +func TestEndpointResolutionEnd_HandleSerialize(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := metrics.InitMetricContext(context.TODO(), &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordEndpointResolutionEndMiddleware() + _, _, _ = mw.HandleSerialize(ctx, middleware.SerializeInput{}, testutils.NoopSerializeHandler{}) + + actualTime := metrics.Context(ctx).Data().ResolveEndpointEndTime + expectedTime := sdk.NowTime() + if actualTime != expectedTime { + t.Errorf("Unexpected ResolveEndpointEndTime, should be '%s' but was '%s'", expectedTime, actualTime) + } +} diff --git a/aws/middleware/private/metrics/middleware/endpoint_resolution_start.go b/aws/middleware/private/metrics/middleware/endpoint_resolution_start.go new file mode 100644 index 00000000000..54c3edf4fb3 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/endpoint_resolution_start.go @@ -0,0 +1,36 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type EndpointResolutionStart struct{} + +func GetRecordEndpointResolutionStartMiddleware() *EndpointResolutionStart { + return &EndpointResolutionStart{} +} + +func (m *EndpointResolutionStart) ID() string { + return "EndpointResolutionStart" +} + +func (m *EndpointResolutionStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + mctx := metrics.Context(ctx) + mctx.Data().ResolveEndpointStartTime = sdk.NowTime() + + out, metadata, err = next.HandleSerialize(ctx, in) + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/endpoint_resolution_start_test.go b/aws/middleware/private/metrics/middleware/endpoint_resolution_start_test.go new file mode 100644 index 00000000000..5867825c0b3 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/endpoint_resolution_start_test.go @@ -0,0 +1,51 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + "testing" + "time" +) + +func TestEndpointResolutionStart_HandleSerialize_Success(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordEndpointResolutionStartMiddleware() + _, _, _ = mw.HandleSerialize(ctx, middleware.SerializeInput{}, testutils.NoopSerializeHandler{}) + + actualTime := metrics.Context(ctx).Data().ResolveEndpointStartTime + expectedTime := sdk.NowTime() + if actualTime != expectedTime { + t.Errorf("Unexpected ResolveEndpointStartTime, should be '%s' but was '%s'", expectedTime, actualTime) + } +} + +func TestEndpointResolutionStart_HandleSerialize_Error(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := metrics.InitMetricContext(context.TODO(), &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + + mw := GetRecordEndpointResolutionStartMiddleware() + _, _, _ = mw.HandleSerialize(ctx, middleware.SerializeInput{}, testutils.NoopSerializeHandler{}) + + actualTime := metrics.Context(ctx).Data().ResolveEndpointStartTime + expectedTime := sdk.NowTime() + if actualTime != expectedTime { + t.Errorf("Unexpected ResolveEndpointStartTime, should be '%s' but was '%s'", expectedTime, actualTime) + } +} diff --git a/aws/middleware/private/metrics/middleware/http.go b/aws/middleware/private/metrics/middleware/http.go new file mode 100644 index 00000000000..95cbffd96dc --- /dev/null +++ b/aws/middleware/private/metrics/middleware/http.go @@ -0,0 +1,154 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "fmt" + "net/http" + "net/http/httptrace" + "reflect" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +const ( + idleConnFieldName = "idleConn" + addressFieldName = "addr" + unkHttpClientName = "Other" + defaultHttpClientName = "Default" +) + +type HTTPMetrics struct { + client *http.Client +} + +func GetHttpMetricMiddleware(client *http.Client) *HTTPMetrics { + return &HTTPMetrics{ + client: client, + } +} + +func (m *HTTPMetrics) ID() string { + return "HTTPMetrics" +} + +func (m *HTTPMetrics) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, attemptError error, +) { + ctx = m.addTraceContext(ctx) + finalize, metadata, err := next.HandleFinalize(ctx, in) + return finalize, metadata, err +} + +var addClientTrace = func(ctx context.Context, trace *httptrace.ClientTrace) context.Context { + return httptrace.WithClientTrace(ctx, trace) +} + +func (m *HTTPMetrics) addTraceContext(ctx context.Context) context.Context { + mctx := metrics.Context(ctx) + counter := mctx.ConnectionCounter() + + attemptMetrics, attemptErr := mctx.Data().LatestAttempt() + + if attemptErr == nil { + trace := &httptrace.ClientTrace{ + GotFirstResponseByte: func() { + gotFirstResponseByte(attemptMetrics, sdk.NowTime()) + }, + GetConn: func(hostPort string) { + getConn(attemptMetrics, counter, sdk.NowTime(), m.client, hostPort) + }, + GotConn: func(info httptrace.GotConnInfo) { + gotConn(attemptMetrics, counter, info, time.Now()) + }, + } + + ctx = addClientTrace(ctx, trace) + } else { + fmt.Println(attemptErr) + } + return ctx +} + +func gotFirstResponseByte(attemptMetrics *metrics.AttemptMetrics, now time.Time) { + attemptMetrics.FirstByteTime = now +} + +func getConn( + attemptMetrics *metrics.AttemptMetrics, counter *metrics.SharedConnectionCounter, now time.Time, client *http.Client, hostPort string, +) { + attemptMetrics.ConnRequestedTime = now + attemptMetrics.PendingConnectionAcquires = int(counter.PendingConnectionAcquire()) + attemptMetrics.ActiveRequests = int(counter.ActiveRequests()) + + // Adding HTTP client metrics here since we need the hostPort to identify the correct conn queues. + addHTTPClientMetrics(attemptMetrics, client, hostPort) + counter.AddPendingConnectionAcquire() +} + +func gotConn( + attemptMetrics *metrics.AttemptMetrics, counter *metrics.SharedConnectionCounter, info httptrace.GotConnInfo, now time.Time, +) { + attemptMetrics.ReusedConnection = info.Reused + attemptMetrics.ConnObtainedTime = now + counter.RemovePendingConnectionAcquire() +} + +func addHTTPClientMetrics(attemptMetrics *metrics.AttemptMetrics, client *http.Client, hostPort string) { + + maxConnsPerHost := -1 + idleConnCountPerHost := -1 + httpClient := unkHttpClientName + + clientInterface := interface{}(client) + + switch clientInterface.(type) { + // If not a standard HTTP client we cannot retrieve these metrics + case *http.Client: + transport := clientInterface.(*http.Client).Transport + httpClient = defaultHttpClientName + switch transport.(type) { + case *http.Transport: + + maxConnsPerHost = transport.(*http.Transport).MaxConnsPerHost + + transportPtr := reflect.ValueOf(transport.(*http.Transport)) + + if transportPtr.IsValid() && transportPtr.Kind() == reflect.Pointer { + + transportValue := transportPtr.Elem() + idleConn := transportValue.FieldByName(idleConnFieldName) + + if idleConn.IsValid() && idleConn.Kind() == reflect.Map { + + IdleConnMap := idleConn.MapRange() + // We iterate through all the connection queues to look for the target host + for IdleConnMap.Next() { + address := IdleConnMap.Key().FieldByName(addressFieldName) + + if address.IsValid() && address.Kind() == reflect.String { + + if address.String() == hostPort { + // Number of idle connections for the requests host + idleConnCountPerHost = IdleConnMap.Value().Len() + break + } + } + } + } + } + } + } + + attemptMetrics.HTTPClient = httpClient + attemptMetrics.AvailableConcurrency = idleConnCountPerHost + attemptMetrics.MaxConcurrency = maxConnsPerHost +} diff --git a/aws/middleware/private/metrics/middleware/http_test.go b/aws/middleware/private/metrics/middleware/http_test.go new file mode 100644 index 00000000000..15d7caa2036 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/http_test.go @@ -0,0 +1,146 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "net/http" + "net/http/httptrace" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +func TestHTTPMetrics_HandleFinalizes(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + counter := metrics.SharedConnectionCounter{} + + client := http.Client{ + Transport: &http.Transport{ + MaxIdleConnsPerHost: 10, + }, + } + + ctx := metrics.InitMetricContext(context.TODO(), &counter, &testutils.NoopPublisher{}) + + mw := GetHttpMetricMiddleware(&client) + + mctx := metrics.Context(ctx) + mctx.Data().NewAttempt() + + var traceInput *httptrace.ClientTrace + + addClientTrace = func(ctx context.Context, trace *httptrace.ClientTrace) context.Context { + traceInput = trace + return ctx + } + + _, _, _ = mw.HandleFinalize(ctx, middleware.FinalizeInput{}, testutils.NoopFinalizeHandler{}) + + if traceInput == nil { + t.Fatal("trace should be added to context") + } +} + +func TestHTTPMetrics_HandleFinalizes_AttemptErr(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + counter := metrics.SharedConnectionCounter{} + + client := http.Client{ + Transport: &http.Transport{ + MaxIdleConnsPerHost: 10, + }, + } + + ctx := metrics.InitMetricContext(context.TODO(), &counter, &testutils.NoopPublisher{}) + + mw := GetHttpMetricMiddleware(&client) + + var traceInput *httptrace.ClientTrace + + addClientTrace = func(ctx context.Context, trace *httptrace.ClientTrace) context.Context { + traceInput = trace + return ctx + } + + _, _, _ = mw.HandleFinalize(ctx, middleware.FinalizeInput{}, testutils.NoopFinalizeHandler{}) + + if traceInput != nil { + t.Fatal("trace should not be added to context") + } + +} + +func TestHTTPMetrics_callbacks(t *testing.T) { + + counter := metrics.SharedConnectionCounter{} + + client := http.Client{ + Transport: &http.Transport{ + MaxIdleConnsPerHost: 10, + }, + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &counter, &testutils.NoopPublisher{}) + mctx := metrics.Context(ctx) + mctx.Data().NewAttempt() + attempt, _ := mctx.Data().LatestAttempt() + + counter.AddPendingConnectionAcquire() + counter.AddActiveRequest() + + gotFirstResponseByte(attempt, sdk.NowTime()) + + getConn(attempt, &counter, sdk.NowTime(), &client, "hostPort") + + gotConn(attempt, &counter, httptrace.GotConnInfo{ + Conn: nil, + Reused: false, + WasIdle: false, + IdleTime: 0, + }, sdk.NowTime()) + + actualConnRequestedTime := attempt.ConnRequestedTime + expectedConnRequestedTime := sdk.NowTime() + + if actualConnRequestedTime != expectedConnRequestedTime { + t.Errorf("Unexpected ConnRequestedTime, should be '%s' but was '%s'", expectedConnRequestedTime, actualConnRequestedTime) + } + + actualPendingConnectionAcquires := attempt.PendingConnectionAcquires + expectedPendingConnectionAcquires := 1 + + if actualPendingConnectionAcquires != expectedPendingConnectionAcquires { + t.Errorf("Unexpected PendingConnectionAcquires, should be '%d' but was '%d'", expectedPendingConnectionAcquires, actualPendingConnectionAcquires) + } + + actualActiveRequests := attempt.ActiveRequests + expectedActiveRequests := 1 + + if actualActiveRequests != expectedActiveRequests { + t.Errorf("Unexpected ActiveRequests, should be '%d' but was '%d'", expectedActiveRequests, actualActiveRequests) + } + + actualConnObtainedTime := attempt.ConnObtainedTime + expectedConnObtainedTime := sdk.NowTime() + + if actualConnObtainedTime != expectedConnObtainedTime { + t.Errorf("Unexpected ConnObtainedTime, should be '%s' but was '%s'", actualConnObtainedTime, expectedConnObtainedTime) + } + +} diff --git a/aws/middleware/private/metrics/middleware/metric_collection.go b/aws/middleware/private/metrics/middleware/metric_collection.go new file mode 100644 index 00000000000..c553d1a114a --- /dev/null +++ b/aws/middleware/private/metrics/middleware/metric_collection.go @@ -0,0 +1,64 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type MetricCollection struct { + cc *metrics.SharedConnectionCounter + publisher metrics.MetricPublisher +} + +func GetSetupMetricCollectionMiddleware( + counter *metrics.SharedConnectionCounter, publisher metrics.MetricPublisher, +) *MetricCollection { + return &MetricCollection{ + cc: counter, + publisher: publisher, + } +} + +func (m *MetricCollection) ID() string { + return "MetricCollection" +} + +func (m *MetricCollection) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + + ctx = metrics.InitMetricContext(ctx, m.cc, m.publisher) + + mctx := metrics.Context(ctx) + metricData := mctx.Data() + + metricData.RequestStartTime = sdk.NowTime() + + out, metadata, err = next.HandleInitialize(ctx, in) + + metricData.RequestEndTime = sdk.NowTime() + + if err == nil { + metricData.Success = 1 + } else { + metricData.Success = 0 + } + + metricData.ComputeRequestMetrics() + + publishErr := m.publisher.PostRequestMetrics(metrics.Context(ctx).Data()) + if publishErr != nil { + fmt.Println("Failed to post request metrics") + } + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/metric_collection_test.go b/aws/middleware/private/metrics/middleware/metric_collection_test.go new file mode 100644 index 00000000000..7f28ad38242 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/metric_collection_test.go @@ -0,0 +1,89 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +func TestGetSetupMetricCollectionMiddleware(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + cases := map[string]struct { + ConnectionCounter *metrics.SharedConnectionCounter + Publisher metrics.MetricPublisher + Input middleware.InitializeInput + Handler middleware.InitializeHandler + ExpectedStartTime time.Time + ExpectedEndTime time.Time + ExpectedSuccess uint8 + }{ + "success": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + Input: middleware.InitializeInput{}, + Handler: testutils.NoopInitializeHandler{}, + ExpectedStartTime: time.Unix(1234, 0), + ExpectedEndTime: time.Unix(1234, 0), + ExpectedSuccess: 1, + }, + "!success": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + Input: middleware.InitializeInput{}, + Handler: testutils.ErrorInitializeHandler{}, + ExpectedStartTime: time.Unix(1234, 0), + ExpectedEndTime: time.Unix(1234, 0), + ExpectedSuccess: 0, + }, + "publisherFailure": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.ErrorPublisher{}, + Input: middleware.InitializeInput{}, + Handler: testutils.NoopInitializeHandler{}, + ExpectedStartTime: time.Unix(1234, 0), + ExpectedEndTime: time.Unix(1234, 0), + ExpectedSuccess: 1, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + ctx := metrics.InitMetricContext(context.TODO(), c.ConnectionCounter, c.Publisher) + + mw := GetSetupMetricCollectionMiddleware(c.ConnectionCounter, c.Publisher) + + _, _, _ = mw.HandleInitialize(ctx, c.Input, c.Handler) + + mctx := metrics.Context(ctx) + + actualRequestStartTime := mctx.Data().RequestStartTime + actualRequestEndTime := mctx.Data().RequestEndTime + + if actualRequestStartTime != c.ExpectedStartTime { + t.Errorf("Unexpected RequestStartTime, should be '%s' but was '%s'", c.ExpectedStartTime, actualRequestStartTime) + } + if actualRequestEndTime != c.ExpectedEndTime { + t.Errorf("Unexpected RequestEndTime, should be '%s' but was '%s'", c.ExpectedEndTime, actualRequestEndTime) + } + if mctx.Data().Success != c.ExpectedSuccess { + t.Errorf("Unexpected Success status, should be '%d' but was '%d'", c.ExpectedSuccess, mctx.Data().Success) + } + + }) + } + +} diff --git a/aws/middleware/private/metrics/middleware/request.go b/aws/middleware/private/metrics/middleware/request.go new file mode 100644 index 00000000000..983fa9c5d58 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/request.go @@ -0,0 +1,61 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + clientRequestIdKey = "Amz-Sdk-Invocation-Id" + unkClientId = "unk" +) + +type RegisterMetricContext struct{} + +func GetRegisterRequestMetricContextMiddleware() *RegisterMetricContext { + return &RegisterMetricContext{} +} + +func (m *RegisterMetricContext) ID() string { + return "RegisterMetricContext" +} + +func (m *RegisterMetricContext) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + + mctx := metrics.Context(ctx) + metricData := mctx.Data() + + metricData.ServiceID = awsmiddleware.GetServiceID(ctx) + metricData.OperationName = awsmiddleware.GetOperationName(ctx) + metricData.PartitionID = awsmiddleware.GetPartitionID(ctx) + metricData.Region = awsmiddleware.GetSigningRegion(ctx) + + switch req := in.Request.(type) { + case *smithyhttp.Request: + crid := req.Header.Get(clientRequestIdKey) + if len(crid) == 0 { + crid = unkClientId + } + metricData.ClientRequestID = crid + metricData.RequestContentLength = req.ContentLength + default: + metricData.ClientRequestID = unkClientId + metricData.RequestContentLength = -1 + } + + out, metadata, err = next.HandleFinalize(ctx, in) + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/request_attempt.go b/aws/middleware/private/metrics/middleware/request_attempt.go new file mode 100644 index 00000000000..a8a9e10c14c --- /dev/null +++ b/aws/middleware/private/metrics/middleware/request_attempt.go @@ -0,0 +1,70 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +const ( + amznRequestIdKey = "X-Amz-Request-Id" + amznRequestId2Key = "X-Amz-Id-2" + unkAmznReqId = "unk" + unkAmznReqId2 = "unk" +) + +type RegisterAttemptMetricContext struct{} + +func GetRegisterAttemptMetricContextMiddleware() *RegisterAttemptMetricContext { + return &RegisterAttemptMetricContext{} +} + +func (m *RegisterAttemptMetricContext) ID() string { + return "RegisterAttemptMetricContext" +} + +var getRawResponse = func(metadata middleware.Metadata) *http.Response { + switch res := awsmiddleware.GetRawResponse(metadata).(type) { + case *http.Response: + return res + default: + return nil + } +} + +func (m *RegisterAttemptMetricContext) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + + mctx := metrics.Context(ctx) + mctx.Data().NewAttempt() + + out, metadata, err = next.HandleFinalize(ctx, in) + + res := getRawResponse(metadata) + + attemptMetrics, _ := mctx.Data().LatestAttempt() + + if res != nil { + attemptMetrics.RequestID = res.Header.Get(amznRequestIdKey) + attemptMetrics.ExtendedRequestID = res.Header.Get(amznRequestId2Key) + attemptMetrics.StatusCode = res.StatusCode + attemptMetrics.ResponseContentLength = res.ContentLength + } else { + attemptMetrics.RequestID = unkAmznReqId + attemptMetrics.ExtendedRequestID = unkAmznReqId2 + attemptMetrics.StatusCode = -1 + attemptMetrics.ResponseContentLength = -1 + } + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/request_attempt_test.go b/aws/middleware/private/metrics/middleware/request_attempt_test.go new file mode 100644 index 00000000000..96408760a9b --- /dev/null +++ b/aws/middleware/private/metrics/middleware/request_attempt_test.go @@ -0,0 +1,114 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func TestRegisterAttemptMetricContext_HandleFinalize(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + cases := map[string]struct { + ConnectionCounter *metrics.SharedConnectionCounter + Publisher metrics.MetricPublisher + ProvideResponse func(metadata middleware.Metadata) *smithyhttp.Response + Handler middleware.FinalizeHandler + Input middleware.FinalizeInput + ExpectedRequestId string + ExpectedExtendedRequestId string + ExpectedStatusCode int + ExpectedResponseContentLength int64 + }{ + "success": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + ProvideResponse: func(metadata middleware.Metadata) *smithyhttp.Response { + res := smithyhttp.Response{} + res.Response = &http.Response{ + StatusCode: 400, + ContentLength: 1234, + Header: map[string][]string{}, + } + res.Header.Set(amznRequestIdKey, "reqId") + res.Header.Set(amznRequestId2Key, "reqId2") + return &res + }, + Handler: testutils.NoopFinalizeHandler{}, + Input: middleware.FinalizeInput{}, + ExpectedRequestId: "reqId", + ExpectedExtendedRequestId: "reqId2", + ExpectedStatusCode: 400, + ExpectedResponseContentLength: 1234, + }, + "noResInfo": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + ProvideResponse: func(metadata middleware.Metadata) *smithyhttp.Response { + return nil + }, + Handler: testutils.NoopFinalizeHandler{}, + ExpectedRequestId: unkAmznReqId, + ExpectedExtendedRequestId: unkAmznReqId2, + ExpectedStatusCode: -1, + ExpectedResponseContentLength: -1, + }, + "noMetadata": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + ProvideResponse: getRawResponse, + Handler: testutils.NoopFinalizeHandler{}, + ExpectedRequestId: unkAmznReqId, + ExpectedExtendedRequestId: unkAmznReqId2, + ExpectedStatusCode: -1, + ExpectedResponseContentLength: -1, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + getRawResponse = c.ProvideResponse + + ctx := metrics.InitMetricContext(context.TODO(), c.ConnectionCounter, c.Publisher) + + mw := GetRegisterAttemptMetricContextMiddleware() + + _, _, _ = mw.HandleFinalize(ctx, c.Input, c.Handler) + + latestAttempt, _ := metrics.Context(ctx).Data().LatestAttempt() + actualRequestId := latestAttempt.RequestID + actualExtendedRequestId := latestAttempt.ExtendedRequestID + actualStatusCode := latestAttempt.StatusCode + actualResponseContentLength := latestAttempt.ResponseContentLength + + if actualRequestId != c.ExpectedRequestId { + t.Errorf("Unexpected RequestId, should be '%s' but was '%s'", c.ExpectedRequestId, actualRequestId) + } + if actualExtendedRequestId != c.ExpectedExtendedRequestId { + t.Errorf("Unexpected ExtendedRequestId, should be '%s' but was '%s'", c.ExpectedExtendedRequestId, actualExtendedRequestId) + } + if actualStatusCode != c.ExpectedStatusCode { + t.Errorf("Unexpected StatusCode, should be '%d' but was '%d'", c.ExpectedStatusCode, actualStatusCode) + } + if actualResponseContentLength != c.ExpectedResponseContentLength { + t.Errorf("Unexpected StatusCode, should be '%d' but was '%d'", c.ExpectedResponseContentLength, actualResponseContentLength) + } + }) + } + +} diff --git a/aws/middleware/private/metrics/middleware/request_test.go b/aws/middleware/private/metrics/middleware/request_test.go new file mode 100644 index 00000000000..84dc4eac9ee --- /dev/null +++ b/aws/middleware/private/metrics/middleware/request_test.go @@ -0,0 +1,86 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func TestRegisterMetricContext_HandleFinalize(t *testing.T) { + + cases := map[string]struct { + ConnectionCounter *metrics.SharedConnectionCounter + Publisher metrics.MetricPublisher + ProvideInput func() middleware.FinalizeInput + Handler middleware.FinalizeHandler + ExpectedCrId string + }{ + "success": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + ProvideInput: func() middleware.FinalizeInput { + req := smithyhttp.NewStackRequest().(*smithyhttp.Request) + req.Header.Set(clientRequestIdKey, "crid") + return middleware.FinalizeInput{Request: req} + }, + Handler: testutils.NoopFinalizeHandler{}, + ExpectedCrId: "crid", + }, + "noCrIdHeader": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + ProvideInput: func() middleware.FinalizeInput { + req := smithyhttp.NewStackRequest().(*smithyhttp.Request) + return middleware.FinalizeInput{Request: req} + }, + Handler: testutils.NoopFinalizeHandler{}, + ExpectedCrId: unkClientId, + }, + "nilRequest": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + ProvideInput: func() middleware.FinalizeInput { + return middleware.FinalizeInput{Request: nil} + }, + Handler: testutils.NoopFinalizeHandler{}, + ExpectedCrId: unkClientId, + }, + "wrongRequestType": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: &testutils.NoopPublisher{}, + ProvideInput: func() middleware.FinalizeInput { + return middleware.FinalizeInput{Request: "nil"} + }, + Handler: testutils.NoopFinalizeHandler{}, + ExpectedCrId: unkClientId, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + ctx := metrics.InitMetricContext(context.TODO(), c.ConnectionCounter, c.Publisher) + + mw := GetRegisterRequestMetricContextMiddleware() + + _, _, _ = mw.HandleFinalize(ctx, c.ProvideInput(), c.Handler) + + mctx := metrics.Context(ctx) + actualCrId := mctx.Data().ClientRequestID + + if actualCrId != c.ExpectedCrId { + t.Errorf("Unexpected ClientRequestId, should be '%s' but was '%s'", c.ExpectedCrId, actualCrId) + } + + }) + } + +} diff --git a/aws/middleware/private/metrics/middleware/stack_deserialize_end.go b/aws/middleware/private/metrics/middleware/stack_deserialize_end.go new file mode 100644 index 00000000000..ac9e109f385 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_deserialize_end.go @@ -0,0 +1,45 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type StackDeserializeEnd struct{} + +func GetRecordStackDeserializeEndMiddleware() *StackDeserializeEnd { + return &StackDeserializeEnd{} +} + +func (m *StackDeserializeEnd) ID() string { + return "StackDeserializeEnd" +} + +func (m *StackDeserializeEnd) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, attemptErr error, +) { + + out, metadata, err := next.HandleDeserialize(ctx, in) + + mctx := metrics.Context(ctx) + + attemptMetrics, attemptErr := mctx.Data().LatestAttempt() + + if attemptErr != nil { + fmt.Println(attemptErr) + } else { + attemptMetrics.DeserializeEndTime = sdk.NowTime() + } + + return out, metadata, err + +} diff --git a/aws/middleware/private/metrics/middleware/stack_deserialize_end_test.go b/aws/middleware/private/metrics/middleware/stack_deserialize_end_test.go new file mode 100644 index 00000000000..a47d922cbeb --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_deserialize_end_test.go @@ -0,0 +1,63 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + "testing" + "time" +) + +func TestStackDeserializeEnd_HandleDeserializeSuccess(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordStackDeserializeEndMiddleware() + + data := metrics.Context(ctx).Data() + + data.NewAttempt() + + _, _, _ = mw.HandleDeserialize(ctx, middleware.DeserializeInput{}, testutils.NoopDeserializeHandler{}) + + attempt, _ := data.LatestAttempt() + actualTime := attempt.DeserializeEndTime + expectedTime := sdk.NowTime() + if actualTime != expectedTime { + t.Errorf("Unexpected DeserializeEndTime, should be '%s' but was '%s'", expectedTime, actualTime) + } +} + +func TestStackDeserializeEnd_HandleDeserializeNoAttempt(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordStackDeserializeEndMiddleware() + + data := metrics.Context(ctx).Data() + + _, _, _ = mw.HandleDeserialize(ctx, middleware.DeserializeInput{}, testutils.NoopDeserializeHandler{}) + + attempt, err := data.LatestAttempt() + + if attempt != nil { + t.Errorf("Attempt should be nil") + } + if err == nil { + t.Errorf("Err should not be nil") + } +} diff --git a/aws/middleware/private/metrics/middleware/stack_deserialize_start.go b/aws/middleware/private/metrics/middleware/stack_deserialize_start.go new file mode 100644 index 00000000000..c21e79df534 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_deserialize_start.go @@ -0,0 +1,44 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type StackDeserializeStart struct{} + +func GetRecordStackDeserializeStartMiddleware() *StackDeserializeStart { + return &StackDeserializeStart{} +} + +func (m *StackDeserializeStart) ID() string { + return "StackDeserializeStart" +} + +func (m *StackDeserializeStart) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + + out, metadata, err = next.HandleDeserialize(ctx, in) + + mctx := metrics.Context(ctx) + + attemptMetrics, attemptErr := mctx.Data().LatestAttempt() + + if attemptErr != nil { + fmt.Println(err) + } else { + attemptMetrics.DeserializeStartTime = sdk.NowTime() + } + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/stack_deserialize_start_test.go b/aws/middleware/private/metrics/middleware/stack_deserialize_start_test.go new file mode 100644 index 00000000000..a2c5fabec0d --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_deserialize_start_test.go @@ -0,0 +1,59 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + "testing" + "time" +) + +func TestStackDeserializeStart_HandleDeserializeSuccess(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordStackDeserializeStartMiddleware() + + data := metrics.Context(ctx).Data() + + data.NewAttempt() + + _, _, _ = mw.HandleDeserialize(ctx, middleware.DeserializeInput{}, testutils.NoopDeserializeHandler{}) + + attempt, _ := data.LatestAttempt() + actualTime := attempt.DeserializeStartTime + expectedTime := sdk.NowTime() + if actualTime != expectedTime { + t.Errorf("Unexpected DeserializeStartTime, should be '%s' but was '%s'", expectedTime, actualTime) + } +} + +func TestStackDeserializeStart_HandleDeserializeNoAttempt(t *testing.T) { + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordStackDeserializeStartMiddleware() + + data := metrics.Context(ctx).Data() + + _, _, _ = mw.HandleDeserialize(ctx, middleware.DeserializeInput{}, testutils.NoopDeserializeHandler{}) + + attempt, err := data.LatestAttempt() + + if attempt != nil { + t.Errorf("Attempt should be nil") + } + if err == nil { + t.Errorf("Err should not be nil") + } +} diff --git a/aws/middleware/private/metrics/middleware/stack_serialize_end.go b/aws/middleware/private/metrics/middleware/stack_serialize_end.go new file mode 100644 index 00000000000..c742edcf240 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_serialize_end.go @@ -0,0 +1,33 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type StackSerializeEnd struct{} + +func GetRecordStackSerializeEndMiddleware() *StackSerializeEnd { + return &StackSerializeEnd{} +} + +func (m *StackSerializeEnd) ID() string { + return "StackSerializeEnd" +} + +func (m *StackSerializeEnd) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + mctx := metrics.Context(ctx) + mctx.Data().SerializeEndTime = sdk.NowTime() + + out, metadata, err = next.HandleSerialize(ctx, in) + + return out, metadata, err + +} diff --git a/aws/middleware/private/metrics/middleware/stack_serialize_end_test.go b/aws/middleware/private/metrics/middleware/stack_serialize_end_test.go new file mode 100644 index 00000000000..66e5c349f25 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_serialize_end_test.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + "testing" + "time" +) + +func TestStartSerializeEnd_HandleSerialize(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordStackSerializeEndMiddleware() + _, _, _ = mw.HandleSerialize(ctx, middleware.SerializeInput{}, testutils.NoopSerializeHandler{}) + + actualTime := metrics.Context(ctx).Data().SerializeEndTime + expectedTime := sdk.NowTime() + if actualTime != expectedTime { + t.Errorf("Unexpected SerializeEndTime, should be '%s' but was '%s'", expectedTime, actualTime) + } +} diff --git a/aws/middleware/private/metrics/middleware/stack_serialize_start.go b/aws/middleware/private/metrics/middleware/stack_serialize_start.go new file mode 100644 index 00000000000..996b32fcf6d --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_serialize_start.go @@ -0,0 +1,32 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type StackSerializeStart struct{} + +func GetRecordStackSerializeStartMiddleware() *StackSerializeStart { + return &StackSerializeStart{} +} + +func (m *StackSerializeStart) ID() string { + return "StackSerializeStart" +} + +func (m *StackSerializeStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + mctx := metrics.Context(ctx) + mctx.Data().SerializeStartTime = sdk.NowTime() + + out, metadata, err = next.HandleSerialize(ctx, in) + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/stack_serialize_start_test.go b/aws/middleware/private/metrics/middleware/stack_serialize_start_test.go new file mode 100644 index 00000000000..c5e536b588e --- /dev/null +++ b/aws/middleware/private/metrics/middleware/stack_serialize_start_test.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + "testing" + "time" +) + +func TestStartSerializeStart_HandleSerialize(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mw := GetRecordStackSerializeStartMiddleware() + _, _, _ = mw.HandleSerialize(ctx, middleware.SerializeInput{}, testutils.NoopSerializeHandler{}) + + actualTime := metrics.Context(ctx).Data().SerializeStartTime + expectedTime := sdk.NowTime() + if actualTime != expectedTime { + t.Errorf("Unexpected SerializeStartTime, should be '%s' but was '%s'", expectedTime, actualTime) + } +} diff --git a/aws/middleware/private/metrics/middleware/transport.go b/aws/middleware/private/metrics/middleware/transport.go new file mode 100644 index 00000000000..a43e1f9782f --- /dev/null +++ b/aws/middleware/private/metrics/middleware/transport.go @@ -0,0 +1,46 @@ +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" +) + +type TransportMetrics struct{} + +func GetTransportMetricsMiddleware() *TransportMetrics { + return &TransportMetrics{} +} + +func (m *TransportMetrics) ID() string { + return "TransportMetrics" +} + +func (m *TransportMetrics) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, attemptErr error, +) { + + mctx := metrics.Context(ctx) + + if attempt, e := mctx.Data().LatestAttempt(); e == nil { + attempt.ServiceCallStart = sdk.NowTime() + mctx.ConnectionCounter().AddActiveRequest() + } + + out, metadata, err := next.HandleDeserialize(ctx, in) + + if attempt, e := mctx.Data().LatestAttempt(); e == nil { + attempt.ServiceCallEnd = sdk.NowTime() + mctx.ConnectionCounter().RemoveActiveRequest() + } + + return out, metadata, err + +} diff --git a/aws/middleware/private/metrics/middleware/transport_test.go b/aws/middleware/private/metrics/middleware/transport_test.go new file mode 100644 index 00000000000..258952751ed --- /dev/null +++ b/aws/middleware/private/metrics/middleware/transport_test.go @@ -0,0 +1,45 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + "testing" + "time" +) + +func TestTransportMetrics_HandleSerialize(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + + data := metrics.Context(ctx).Data() + + data.NewAttempt() + + mw := GetTransportMetricsMiddleware() + _, _, _ = mw.HandleDeserialize(ctx, middleware.DeserializeInput{}, testutils.NoopDeserializeHandler{}) + + attempt, _ := data.LatestAttempt() + + actualStartTime := attempt.ServiceCallStart + expectedStartTime := sdk.NowTime() + + if actualStartTime != expectedStartTime { + t.Errorf("Unexpected ServiceCallStart, should be '%s' but was '%s'", expectedStartTime, expectedStartTime) + } + + actualEndTime := attempt.ServiceCallEnd + expectedEndTime := sdk.NowTime() + + if actualEndTime != expectedEndTime { + t.Errorf("Unexpected ServiceCallEnd, should be '%s' but was '%s'", expectedEndTime, actualEndTime) + } + +} diff --git a/aws/middleware/private/metrics/middleware/wrap_data_stream.go b/aws/middleware/private/metrics/middleware/wrap_data_stream.go new file mode 100644 index 00000000000..7f7c5d54ebf --- /dev/null +++ b/aws/middleware/private/metrics/middleware/wrap_data_stream.go @@ -0,0 +1,59 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/readcloserwithmetrics" + "github.com/aws/smithy-go/middleware" + "io" + "reflect" +) + +const ( + responseBodyFieldName = "Body" +) + +type WrapDataContext struct{} + +func GetWrapDataStreamMiddleware() *WrapDataContext { + return &WrapDataContext{} +} + +func (m *WrapDataContext) ID() string { + return "BuildWrapDataStream" +} + +func (m *WrapDataContext) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + + out, metadata, err = next.HandleBuild(ctx, in) + + value := reflect.ValueOf(out.Result) + + if value.Kind() != reflect.Ptr { + return out, metadata, err + } + value = value.Elem() + + if value.Kind() != reflect.Struct { + return out, metadata, err + } + bodyField := value.FieldByName(responseBodyFieldName) + + if !(bodyField.IsValid() && bodyField.CanInterface()) { + return out, metadata, err + } + + body, ok := bodyField.Interface().(io.ReadCloser) + + if !ok { + return out, metadata, err + } + + bodyField.Set(reflect.ValueOf(readcloserwithmetrics.New(metrics.Context(ctx), body))) + + return out, metadata, err +} diff --git a/aws/middleware/private/metrics/middleware/wrap_data_streams_test.go b/aws/middleware/private/metrics/middleware/wrap_data_streams_test.go new file mode 100644 index 00000000000..1c5c477f243 --- /dev/null +++ b/aws/middleware/private/metrics/middleware/wrap_data_streams_test.go @@ -0,0 +1,114 @@ +package middleware + +import ( + "context" + "io" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "github.com/aws/smithy-go/middleware" +) + +type TestResult struct { + Body io.ReadCloser +} + +func TestWrapDataStream_HandleBuild(t *testing.T) { + + cases := map[string]struct { + ConnectionCounter *metrics.SharedConnectionCounter + Publisher testutils.MetricDataRecorderPublisher + Result *TestResult + Input middleware.BuildInput + ExpectedStreamData string + ExpectedMetricData metrics.StreamMetrics + }{ + "success": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: testutils.MetricDataRecorderPublisher{}, + Input: middleware.BuildInput{}, + Result: &TestResult{ + Body: &testutils.TestReadCloser{Data: []byte("testString")}, + }, + ExpectedStreamData: "testString", + ExpectedMetricData: metrics.StreamMetrics{ + ReadDuration: 0, + ReadBytes: 10, + }, + }, + "emptyData": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: testutils.MetricDataRecorderPublisher{}, + Input: middleware.BuildInput{}, + Result: &TestResult{ + Body: &testutils.TestReadCloser{Data: []byte("")}, + }, + ExpectedStreamData: "", + ExpectedMetricData: metrics.StreamMetrics{ + ReadDuration: 0, + ReadBytes: 0, + }, + }, + "nilBody": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: testutils.MetricDataRecorderPublisher{}, + Input: middleware.BuildInput{}, + Result: &TestResult{ + Body: nil, + }, + }, + "nilResult": { + ConnectionCounter: &metrics.SharedConnectionCounter{}, + Publisher: testutils.MetricDataRecorderPublisher{}, + Input: middleware.BuildInput{}, + Result: nil, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, c.ConnectionCounter, &c.Publisher) + mw := GetWrapDataStreamMiddleware() + + out, _, _ := mw.HandleBuild(ctx, c.Input, &testutils.StreamingBodyBuildHandler{Result: c.Result}) + + result := out.Result.(*TestResult) + + if result == nil || result.Body == nil { + return + } + + readData, _ := io.ReadAll(result.Body) + actualStreamData := string(readData) + actualMetricData := c.Publisher.Data.Stream + + if actualStreamData != c.ExpectedStreamData { + t.Errorf("Unexpected Data, should be '%s' but was '%s'", c.ExpectedStreamData, actualStreamData) + } + if !reflect.DeepEqual(actualMetricData, c.ExpectedMetricData) { + t.Errorf("Unexpected Metric Data, should be '%+v' but was '%+v'", c.ExpectedMetricData, actualMetricData) + } + + }) + } + +} + +func TestWrapDataStream_WrongResultType(t *testing.T) { + pub := &testutils.NoopPublisher{} + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, pub) + mw := GetWrapDataStreamMiddleware() + + r1 := TestResult{ + Body: &testutils.TestReadCloser{Data: []byte("testString")}, + } + + _, _, _ = mw.HandleBuild(ctx, middleware.BuildInput{}, &testutils.StreamingBodyBuildHandler{Result: r1}) + _, _, _ = mw.HandleBuild(ctx, middleware.BuildInput{}, &testutils.StreamingBodyBuildHandler{Result: pub}) + +} diff --git a/aws/middleware/private/metrics/publisher/emf.go b/aws/middleware/private/metrics/publisher/emf.go new file mode 100644 index 00000000000..d5a0a868595 --- /dev/null +++ b/aws/middleware/private/metrics/publisher/emf.go @@ -0,0 +1,155 @@ +package publisher + +import ( + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/emf" +) + +// EMFPublisher is a MetricPublisher implementation that publishes metrics to stdout using EMF format. +type EMFPublisher struct { + namespace string + serializer metrics.Serializer + additionalDimensions map[string]string +} + +var output = func(format string, a ...interface{}) { + fmt.Printf(format, a...) +} + +// NewEMFPublisher creates a new EMFPublisher with the specified namespace and serializer. +func NewEMFPublisher(namespace string, serializer metrics.Serializer) *EMFPublisher { + return &EMFPublisher{ + namespace: namespace, + serializer: serializer, + additionalDimensions: map[string]string{}, + } +} + +func (p *EMFPublisher) SetAdditionalDimension(key string, value string) { + p.additionalDimensions[key] = value +} + +func (p *EMFPublisher) RemoveAdditionalDimension(key string) { + delete(p.additionalDimensions, key) +} + +func (p *EMFPublisher) populateWithAdditionalDimensions(entry *emf.Entry) { + for k := range p.additionalDimensions { + entry.AddDimension(k, p.additionalDimensions[k]) + } +} + +// perRequestMetrics generates and returns the log entry for per-request metrics. +func (p *EMFPublisher) perRequestMetrics(data *metrics.MetricData) (string, error) { + + entry := emf.NewEntry(p.namespace, p.serializer) + + p.populateWithAdditionalDimensions(&entry) + + entry.AddDimension(metrics.ServiceIDKey, data.ServiceID) + entry.AddDimension(metrics.OperationNameKey, data.OperationName) + entry.AddDimension(metrics.HTTPStatusCodeKey, strconv.Itoa(data.StatusCode)) + + entry.AddProperty(metrics.ClientRequestIDKey, data.ClientRequestID) + + entry.AddMetric(metrics.APICallDurationKey, float64(data.APICallDuration.Nanoseconds())) + entry.AddMetric(metrics.APICallSuccessfulKey, float64(data.Success)) + entry.AddMetric(metrics.MarshallingDurationKey, float64(data.MarshallingDuration.Nanoseconds())) + entry.AddMetric(metrics.EndpointResolutionDurationKey, float64(data.EndpointResolutionDuration.Nanoseconds())) + + entry.AddMetric(metrics.RetryCountKey, float64(data.RetryCount)) + + // We only publish throughput if different then 0 to avoid polluting statistics + if data.InThroughput != 0 { + entry.AddMetric(metrics.InThroughputKey, data.InThroughput) + } + if data.OutThroughput != 0 { + entry.AddMetric(metrics.OutThroughputKey, data.OutThroughput) + } + + return entry.Build() +} + +// perAttemptMetrics generates and returns the log entry for per-attempt metrics. +func (p *EMFPublisher) perAttemptMetrics(data *metrics.MetricData, attemptIndex int) (string, error) { + + attempt := data.Attempts[attemptIndex] + + entry := emf.NewEntry(p.namespace, p.serializer) + + p.populateWithAdditionalDimensions(&entry) + + entry.AddDimension(metrics.ServiceIDKey, data.ServiceID) + entry.AddDimension(metrics.OperationNameKey, data.OperationName) + entry.AddDimension(metrics.HTTPStatusCodeKey, strconv.Itoa(attempt.StatusCode)) + + entry.AddProperty(metrics.ClientRequestIDKey, data.ClientRequestID) + entry.AddProperty(metrics.AWSExtendedRequestIDKey, attempt.ExtendedRequestID) + entry.AddProperty(metrics.AWSRequestIDKey, attempt.RequestID) + entry.AddProperty(metrics.AttemptNumberKey, attemptIndex) + + entry.AddMetric(metrics.MaxConcurrencyKey, float64(attempt.MaxConcurrency)) + entry.AddMetric(metrics.AvailableConcurrencyKey, float64(attempt.AvailableConcurrency)) + entry.AddMetric(metrics.ConcurrencyAcquireDurationKey, float64(attempt.ConcurrencyAcquireDuration.Nanoseconds())) + entry.AddMetric(metrics.PendingConcurrencyAcquiresKey, float64(attempt.PendingConnectionAcquires)) + entry.AddMetric(metrics.SigningDurationKey, float64(attempt.SigningDuration.Nanoseconds())) + entry.AddMetric(metrics.UnmarshallingDurationKey, float64(attempt.UnMarshallingDuration.Nanoseconds())) + entry.AddMetric(metrics.TimeToFirstByteKey, float64(attempt.TimeToFirstByte.Nanoseconds())) + entry.AddMetric(metrics.ServiceCallDurationKey, float64(attempt.ServiceCallDuration.Nanoseconds())) + entry.AddMetric(metrics.BackoffDelayDurationKey, float64(attempt.RetryDelay)) + + return entry.Build() +} + +// perStreamMetrics generates and returns the log entry for per-stream metrics. +func (p *EMFPublisher) perStreamMetrics(data *metrics.MetricData) (string, error) { + + entry := emf.NewEntry(p.namespace, p.serializer) + + p.populateWithAdditionalDimensions(&entry) + + entry.AddDimension(metrics.ServiceIDKey, data.ServiceID) + entry.AddDimension(metrics.OperationNameKey, data.OperationName) + entry.AddDimension(metrics.HTTPStatusCodeKey, strconv.Itoa(data.StatusCode)) + + entry.AddProperty(metrics.ClientRequestIDKey, data.ClientRequestID) + + if data.Stream.Throughput > 0 { + entry.AddMetric(metrics.StreamThroughputKey, data.Stream.Throughput) + } + + return entry.Build() +} + +// PostRequestMetrics publishes the request metrics to stdout using EMF format. +func (p *EMFPublisher) PostRequestMetrics(data *metrics.MetricData) error { + requestMetricLogEntry, err := p.perRequestMetrics(data) + if err != nil { + output("error generating log entry for request metrics due to %s", err.Error()) + } else { + output("%s\n", requestMetricLogEntry) + } + for idx := range data.Attempts { + attemptMetricLogEntry, err := p.perAttemptMetrics(data, idx) + if err != nil { + output("error generating log entry for attempt metrics due to %s", err.Error()) + } else { + output("%s\n", attemptMetricLogEntry) + } + } + return nil +} + +// PostStreamMetrics publishes the stream metrics to stdout using EMF format. +func (p *EMFPublisher) PostStreamMetrics(data *metrics.MetricData) error { + streamMetrics, err := p.perStreamMetrics(data) + if err != nil { + output("error generating log entry for stream metrics due to %s", err.Error()) + } else { + output("%s\n", streamMetrics) + } + return nil +} diff --git a/aws/middleware/private/metrics/publisher/emf_test.go b/aws/middleware/private/metrics/publisher/emf_test.go new file mode 100644 index 00000000000..36fb1f08c9f --- /dev/null +++ b/aws/middleware/private/metrics/publisher/emf_test.go @@ -0,0 +1,255 @@ +// This package is designated as private and is intended for use only by the +// AWS client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package publisher + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +type TestSerializerWithError struct{} + +func (TestSerializerWithError) Serialize(obj interface{}) (string, error) { + return "", fmt.Errorf("serialization error") +} + +func TestPostRequestMetrics(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + cases := map[string]struct { + Namespace string + Serializer metrics.Serializer + Data metrics.MetricData + ExpectedError error + ExpectedResults []string + }{ + "emptyRequestMetricData": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Data: metrics.MetricData{}, + ExpectedError: nil, + ExpectedResults: []string{ + emptyRequestMetricData, + }, + }, + "serializerError": { + Namespace: "testNamespace", + Serializer: TestSerializerWithError{}, + Data: metrics.MetricData{ + Attempts: []metrics.AttemptMetrics{{}}, + }, + ExpectedError: nil, + ExpectedResults: []string{ + "error generating log entry for request metrics due to [serialization error]", + "error generating log entry for attempt metrics due to [serialization error]", + }, + }, + "completeRequestMetricData": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Data: metrics.MetricData{ + RequestStartTime: time.Unix(1234, 0), + RequestEndTime: time.Unix(1434, 0), + SerializeStartTime: time.Unix(1234, 0), + SerializeEndTime: time.Unix(1434, 0), + ResolveEndpointStartTime: time.Unix(1234, 0), + ResolveEndpointEndTime: time.Unix(1434, 0), + Success: 1, + ClientRequestID: "crid", + ServiceID: "sid", + OperationName: "operationname", + PartitionID: "partitionid", + Region: "region", + RequestContentLength: 100, + Stream: metrics.StreamMetrics{}, + Attempts: []metrics.AttemptMetrics{{ + ServiceCallStart: time.Unix(1234, 0), + ServiceCallEnd: time.Unix(1434, 0), + FirstByteTime: time.Unix(1234, 0), + ConnRequestedTime: time.Unix(1234, 0), + ConnObtainedTime: time.Unix(1434, 0), + CredentialFetchStartTime: time.Unix(1234, 0), + CredentialFetchEndTime: time.Unix(1434, 0), + SignStartTime: time.Unix(1234, 0), + SignEndTime: time.Unix(1434, 0), + DeserializeStartTime: time.Unix(1234, 0), + DeserializeEndTime: time.Unix(1434, 0), + RetryDelay: 100, + ResponseContentLength: 100, + StatusCode: 200, + RequestID: "reqid", + ExtendedRequestID: "exreqid", + HTTPClient: "Default", + MaxConcurrency: 10, + PendingConnectionAcquires: 1, + AvailableConcurrency: 2, + ActiveRequests: 3, + ReusedConnection: false, + }, + { + ServiceCallStart: time.Unix(1234, 0), + ServiceCallEnd: time.Unix(1434, 0), + FirstByteTime: time.Unix(1234, 0), + ConnRequestedTime: time.Unix(1234, 0), + ConnObtainedTime: time.Unix(1434, 0), + CredentialFetchStartTime: time.Unix(1234, 0), + CredentialFetchEndTime: time.Unix(1434, 0), + SignStartTime: time.Unix(1234, 0), + SignEndTime: time.Unix(1434, 0), + DeserializeStartTime: time.Unix(1234, 0), + DeserializeEndTime: time.Unix(1434, 0), + RetryDelay: 100, + ResponseContentLength: 100, + StatusCode: 200, + RequestID: "reqid", + ExtendedRequestID: "exreqid", + HTTPClient: "Default", + MaxConcurrency: 10, + PendingConnectionAcquires: 1, + AvailableConcurrency: 2, + ActiveRequests: 3, + ReusedConnection: false, + }}, + }, + ExpectedError: nil, + ExpectedResults: []string{ + completeRequestMetricData, + completeMetricDataAttempt1, + completeMetricDataAttempt2, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + var actualResults []string + + output = func(format string, a ...interface{}) { + actualResults = append(actualResults, fmt.Sprintf(format, a)) + } + + publisher := NewEMFPublisher(c.Namespace, c.Serializer) + + c.Data.ComputeRequestMetrics() + + err := publisher.PostRequestMetrics(&c.Data) + + if !reflect.DeepEqual(err, c.ExpectedError) { + t.Errorf("Unexpected error, should be '%s' but was '%s'", c.ExpectedError, err) + } + + if len(c.ExpectedResults) != len(actualResults) { + t.Errorf("Different number of results. Expected %d but got %d", len(c.ExpectedResults), len(actualResults)) + } + + for i := range c.ExpectedResults { + if !reflect.DeepEqual(actualResults[i], c.ExpectedResults[i]) { + t.Errorf("Unexpected result, should be '%s' but was '%s'", c.ExpectedResults[i], actualResults[i]) + } + } + }) + } +} + +func TestPostStreamMetrics(t *testing.T) { + + sdk.NowTime = func() time.Time { + return time.Unix(1234, 0) + } + + cases := map[string]struct { + Namespace string + Serializer metrics.Serializer + Data metrics.MetricData + ExpectedError error + ExpectedResults []string + }{ + "emptyStreamMetricData": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Data: metrics.MetricData{}, + ExpectedError: nil, + ExpectedResults: []string{ + emptyStreamMetricData, + }, + }, + "serializerError": { + Namespace: "testNamespace", + Serializer: TestSerializerWithError{}, + Data: metrics.MetricData{}, + ExpectedError: nil, + ExpectedResults: []string{ + "error generating log entry for stream metrics due to [serialization error]", + }, + }, + "completeStreamMetricData": { + Namespace: "testNamespace", + Serializer: metrics.DefaultSerializer{}, + Data: metrics.MetricData{ + RequestStartTime: time.Unix(1234, 0), + RequestEndTime: time.Unix(1434, 0), + SerializeStartTime: time.Unix(1234, 0), + SerializeEndTime: time.Unix(1434, 0), + ResolveEndpointStartTime: time.Unix(1234, 0), + ResolveEndpointEndTime: time.Unix(1434, 0), + Success: 1, + StatusCode: 200, + ClientRequestID: "crid", + ServiceID: "sid", + OperationName: "operationname", + PartitionID: "partitionid", + Region: "region", + RequestContentLength: 100, + Stream: metrics.StreamMetrics{ + ReadDuration: 150, + ReadBytes: 12, + Throughput: 80000000, + }, + }, + ExpectedError: nil, + ExpectedResults: []string{ + completeStreamMetricData, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + var actualResults []string + + output = func(format string, a ...interface{}) { + actualResults = append(actualResults, fmt.Sprintf(format, a)) + } + + publisher := NewEMFPublisher(c.Namespace, c.Serializer) + + err := publisher.PostStreamMetrics(&c.Data) + + if !reflect.DeepEqual(err, c.ExpectedError) { + t.Errorf("Unexpected error, should be '%s' but was '%s'", c.ExpectedError, err) + } + + if len(c.ExpectedResults) != len(actualResults) { + t.Errorf("Different number of results. Expected %d but got %d", len(c.ExpectedResults), len(actualResults)) + } + + for i := range c.ExpectedResults { + if !reflect.DeepEqual(actualResults[i], c.ExpectedResults[i]) { + t.Errorf("Unexpected result, should be '%s' but was '%s'", c.ExpectedResults[i], actualResults[i]) + } + } + }) + } +} diff --git a/aws/middleware/private/metrics/publisher/emf_test_data.go b/aws/middleware/private/metrics/publisher/emf_test_data.go new file mode 100644 index 00000000000..73ecd786a3d --- /dev/null +++ b/aws/middleware/private/metrics/publisher/emf_test_data.go @@ -0,0 +1,227 @@ +// This package is designated as private and is intended for use only by the +// AWS client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. + +package publisher + +import "strings" + +func stripString(str string) string { + str = strings.Replace(str, " ", "", -1) + str = strings.Replace(str, "\t", "", -1) + str = strings.Replace(str, "\n", "", -1) + return str + "\n" +} + +var emptyRequestMetricData = stripString(` +[{ + "ApiCallDuration": 0, + "ApiCallSuccessful": 0, + "ClientRequestId": "", + "EndpointResolutionDuration": 0, + "HttpStatusCode":"0", + "MarshallingDuration": 0, + "OperationName": "", + "RetryCount": -1, + "ServiceId": "", + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["ServiceId", "OperationName", "HttpStatusCode"] + ], + "Metrics": [{ + "Name": "ApiCallDuration" + }, { + "Name": "ApiCallSuccessful" + }, { + "Name": "MarshallingDuration" + }, { + "Name": "EndpointResolutionDuration" + }, { + "Name": "RetryCount" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + } +}] +`) + +var emptyStreamMetricData = stripString(` +[{ + "ClientRequestId": "", + "HttpStatusCode": "0", + "OperationName": "", + "ServiceId": "", + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["ServiceId", "OperationName", "HttpStatusCode"] + ], + "Metrics": [], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + } +}] +`) + +var completeStreamMetricData = stripString(` +[{ + "ClientRequestId": "crid", + "HttpStatusCode": "200", + "OperationName": "operationname", + "ServiceId": "sid", + "Throughput": 80000000, + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["ServiceId", "OperationName", "HttpStatusCode"] + ], + "Metrics": [{ + "Name": "Throughput" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + } +}] +`) + +var completeRequestMetricData = stripString(` +[{ + "ApiCallDuration": 200000000000, + "ApiCallSuccessful": 1, + "ClientRequestId": "crid", + "EndpointResolutionDuration": 200000000000, + "HttpStatusCode":"200", + "InThroughput": 0.5, + "MarshallingDuration": 200000000000, + "OperationName": "operationname", + "OutThroughput": 0.5, + "RetryCount": 1, + "ServiceId": "sid", + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["ServiceId", "OperationName", "HttpStatusCode"] + ], + "Metrics": [{ + "Name": "ApiCallDuration" + }, { + "Name": "ApiCallSuccessful" + }, { + "Name": "MarshallingDuration" + }, { + "Name": "EndpointResolutionDuration" + }, { + "Name": "RetryCount" + }, { + "Name": "InThroughput" + }, { + "Name": "OutThroughput" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + } +}] +`) + +var completeMetricDataAttempt1 = stripString(` +[{ + "AttemptNumber": 0, + "AvailableConcurrency": 2, + "AwsExtendedRequestId": "exreqid", + "AwsRequestId": "reqid", + "BackoffDelayDuration":100, + "ClientRequestId": "crid", + "ConcurrencyAcquireDuration": 200000000000, + "HttpStatusCode": "200", + "MaxConcurrency":10, + "OperationName": "operationname", + "PendingConcurrencyAcquires": 1, + "ServiceCallDuration": 200000000000, + "ServiceId": "sid", + "SigningDuration": 200000000000, + "TimeToFirstByte": 0, + "UnmarshallingDuration": 200000000000, + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["ServiceId", "OperationName", "HttpStatusCode"] + ], + "Metrics": [{ + "Name": "MaxConcurrency" + }, { + "Name": "AvailableConcurrency" + }, { + "Name": "ConcurrencyAcquireDuration" + }, { + "Name": "PendingConcurrencyAcquires" + }, { + "Name": "SigningDuration" + }, { + "Name": "UnmarshallingDuration" + }, { + "Name": "TimeToFirstByte" + }, { + "Name": "ServiceCallDuration" + }, { + "Name":"BackoffDelayDuration" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + } +}] +`) + +var completeMetricDataAttempt2 = stripString(` +[{ + "AttemptNumber": 1, + "AvailableConcurrency": 2, + "AwsExtendedRequestId": "exreqid", + "AwsRequestId": "reqid", + "BackoffDelayDuration":100, + "ClientRequestId": "crid", + "ConcurrencyAcquireDuration": 200000000000, + "HttpStatusCode": "200", + "MaxConcurrency":10, + "OperationName": "operationname", + "PendingConcurrencyAcquires": 1, + "ServiceCallDuration": 200000000000, + "ServiceId": "sid", + "SigningDuration": 200000000000, + "TimeToFirstByte": 0, + "UnmarshallingDuration": 200000000000, + "_aws": { + "CloudWatchMetrics": [{ + "Dimensions": [ + ["ServiceId", "OperationName", "HttpStatusCode"] + ], + "Metrics": [{ + "Name": "MaxConcurrency" + }, { + "Name": "AvailableConcurrency" + }, { + "Name": "ConcurrencyAcquireDuration" + }, { + "Name": "PendingConcurrencyAcquires" + }, { + "Name": "SigningDuration" + }, { + "Name": "UnmarshallingDuration" + }, { + "Name": "TimeToFirstByte" + }, { + "Name": "ServiceCallDuration" + }, { + "Name": "BackoffDelayDuration" + }], + "Namespace": "testNamespace" + }], + "Timestamp": 1234000 + } +}] +`) diff --git a/aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics.go b/aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics.go new file mode 100644 index 00000000000..eb234ffa762 --- /dev/null +++ b/aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics.go @@ -0,0 +1,56 @@ +package readcloserwithmetrics + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "io" +) + +type ReadCloserWithMetrics struct { + data *metrics.MetricData + publisher metrics.MetricPublisher + readCloser io.ReadCloser + readFinished bool +} + +func New( + context *metrics.MetricContext, closer io.ReadCloser, +) (trc *ReadCloserWithMetrics) { + return &ReadCloserWithMetrics{ + data: context.Data(), + publisher: context.Publisher(), + readCloser: closer, + readFinished: false, + } +} + +func (r *ReadCloserWithMetrics) Read(p []byte) (n int, err error) { + readRoundStarted := sdk.NowTime() + read, err := r.readCloser.Read(p) + readRoundEnd := sdk.NowTime() + r.data.Stream.ReadDuration += readRoundEnd.Sub(readRoundStarted) + r.data.Stream.ReadBytes += int64(read) + if err == io.EOF { + r.readFinished = true + r.finalize() + } + return read, err +} + +func (r *ReadCloserWithMetrics) Close() error { + if !r.readFinished { + r.finalize() + } + return r.readCloser.Close() +} + +func (r *ReadCloserWithMetrics) finalize() { + if r.data.Stream.ReadDuration > 0 { + r.data.Stream.Throughput = float64(r.data.Stream.ReadBytes) / r.data.Stream.ReadDuration.Seconds() + } + err := r.publisher.PostStreamMetrics(r.data) + if err != nil { + fmt.Println("Failed to post stream metrics") + } +} diff --git a/aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics_test.go b/aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics_test.go new file mode 100644 index 00000000000..97d21b1ecd0 --- /dev/null +++ b/aws/middleware/private/metrics/readcloserwithmetrics/read_closer_with_metrics_test.go @@ -0,0 +1,83 @@ +package readcloserwithmetrics + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/testutils" + "io" + "testing" +) + +func TestNew(t *testing.T) { + + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, &testutils.NoopPublisher{}) + mctx := metrics.Context(ctx) + + expectedData := "testString" + trc := testutils.TestReadCloser{Data: []byte(expectedData)} + + reader := New(mctx, &trc) + readData, _ := io.ReadAll(reader) + actualData := string(readData) + + if actualData != expectedData { + t.Errorf("Unexpected Data, should be '%s' but was '%s'", expectedData, actualData) + } +} + +func TestReadCloserWithMetrics_Close(t *testing.T) { + + mdrp := &testutils.MetricDataRecorderPublisher{} + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, mdrp) + mctx := metrics.Context(ctx) + + expectedData := "testString" + trc := testutils.TestReadCloser{Data: []byte(expectedData)} + + reader := New(mctx, &trc) + + err := reader.Close() + + if err != nil { + t.Errorf("Unexpected Error in Close") + } + + if mdrp.Data == nil { + t.Errorf("Data should be set in publisher") + } + + rb := mdrp.Data.Stream.ReadBytes + if mdrp.Data.Stream.ReadBytes != 0 { + t.Errorf("Unexpected ReadBytes, should be '%d' but was '%d'", 0, rb) + } + +} + +func TestReadCloserWithMetrics_Read(t *testing.T) { + mdrp := &testutils.MetricDataRecorderPublisher{} + ctx := context.TODO() + ctx = metrics.InitMetricContext(ctx, &metrics.SharedConnectionCounter{}, mdrp) + mctx := metrics.Context(ctx) + + expectedData := "testString" + trc := testutils.TestReadCloser{Data: []byte(expectedData)} + + reader := New(mctx, &trc) + + err := reader.Close() + + if err != nil { + t.Errorf("Unexpected Error in Close") + } + + if mdrp.Data == nil { + t.Errorf("Data should be set in publisher") + } + + rb := mdrp.Data.Stream.ReadBytes + if mdrp.Data.Stream.ReadBytes != 0 { + t.Errorf("Unexpected ReadBytes, should be '%d' but was '%d'", 0, rb) + } +} diff --git a/aws/middleware/private/metrics/testutils/test_util.go b/aws/middleware/private/metrics/testutils/test_util.go new file mode 100644 index 00000000000..f19c70298bb --- /dev/null +++ b/aws/middleware/private/metrics/testutils/test_util.go @@ -0,0 +1,107 @@ +package testutils + +import ( + "context" + "fmt" + "io" + + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + "github.com/aws/smithy-go/middleware" +) + +type MetricDataRecorderPublisher struct { + Data *metrics.MetricData +} + +func (mdrp *MetricDataRecorderPublisher) PostRequestMetrics(data *metrics.MetricData) error { + mdrp.Data = data + return nil +} + +func (mdrp *MetricDataRecorderPublisher) PostStreamMetrics(data *metrics.MetricData) error { + mdrp.Data = data + return nil +} + +type NoopPublisher struct{} + +func (np *NoopPublisher) PostRequestMetrics(data *metrics.MetricData) error { + return nil +} + +func (np *NoopPublisher) PostStreamMetrics(data *metrics.MetricData) error { + return nil +} + +type ErrorPublisher struct{} + +func (tp *ErrorPublisher) PostRequestMetrics(data *metrics.MetricData) error { + return fmt.Errorf("publisher error") +} + +func (tp *ErrorPublisher) PostStreamMetrics(data *metrics.MetricData) error { + return fmt.Errorf("publisher error") +} + +type NoopInitializeHandler struct{} +type ErrorInitializeHandler struct{} +type NoopSerializeHandler struct{} +type NoopFinalizeHandler struct{} +type NoopDeserializeHandler struct{} +type StreamingBodyBuildHandler struct { + Result interface{} +} + +func (NoopInitializeHandler) HandleInitialize(ctx context.Context, in middleware.InitializeInput) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + return middleware.InitializeOutput{}, middleware.Metadata{}, nil +} + +func (ErrorInitializeHandler) HandleInitialize(ctx context.Context, in middleware.InitializeInput) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + return middleware.InitializeOutput{}, middleware.Metadata{}, fmt.Errorf("init error") +} + +func (NoopFinalizeHandler) HandleFinalize(ctx context.Context, in middleware.FinalizeInput) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, nil +} + +func (NoopDeserializeHandler) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + return middleware.DeserializeOutput{}, middleware.Metadata{}, nil +} + +func (NoopSerializeHandler) HandleSerialize(ctx context.Context, in middleware.SerializeInput) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + return middleware.SerializeOutput{}, middleware.Metadata{}, nil +} + +func (s *StreamingBodyBuildHandler) HandleBuild(ctx context.Context, in middleware.BuildInput) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + return middleware.BuildOutput{Result: s.Result}, middleware.Metadata{}, nil +} + +type TestReadCloser struct { + Data []byte + offset int +} + +func (m *TestReadCloser) Read(p []byte) (int, error) { + if m.offset >= len(m.Data) { + return 0, io.EOF + } + n := copy(p, m.Data[m.offset:]) + m.offset += n + return n, nil +} + +func (m *TestReadCloser) Close() error { + return nil +} diff --git a/aws/retry/middleware.go b/aws/retry/middleware.go index 822fc920a75..722ca34c6a0 100644 --- a/aws/retry/middleware.go +++ b/aws/retry/middleware.go @@ -3,6 +3,7 @@ package retry import ( "context" "fmt" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" "strconv" "strings" "time" @@ -225,6 +226,13 @@ func (r *Attempt) handleAttempt( // that time. Potentially early exist if the sleep is canceled via the // context. retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) + mctx := metrics.Context(ctx) + if mctx != nil { + attempt, err := mctx.Data().LatestAttempt() + if err != nil { + attempt.RetryDelay = retryDelay + } + } if reqErr != nil { return out, attemptResult, releaseRetryToken, reqErr } diff --git a/aws/signer/v4/middleware.go b/aws/signer/v4/middleware.go index f682fb5dce0..f39a369ad84 100644 --- a/aws/signer/v4/middleware.go +++ b/aws/signer/v4/middleware.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/internal/sdk" @@ -300,7 +301,22 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} } + mctx := metrics.Context(ctx) + + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.CredentialFetchStartTime = sdk.NowTime() + } + } + credentials, err := s.credentialsProvider.Retrieve(ctx) + + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.CredentialFetchEndTime = sdk.NowTime() + } + } + if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} } @@ -321,7 +337,20 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl }) } + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.SignStartTime = sdk.NowTime() + } + } + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) + + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.SignEndTime = sdk.NowTime() + } + } + if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} } diff --git a/aws/signer/v4/v4.go b/aws/signer/v4/v4.go index 4d162556bbf..bb61904e1d8 100644 --- a/aws/signer/v4/v4.go +++ b/aws/signer/v4/v4.go @@ -68,6 +68,9 @@ import ( const ( signingAlgorithm = "AWS4-HMAC-SHA256" authorizationHeader = "Authorization" + + // Version of signing v4 + Version = "SigV4" ) // HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests @@ -103,6 +106,11 @@ type SignerOptions struct { // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent // presigned URL. LogSigning bool + + // Disables setting the session token on the request as part of signing + // through X-Amz-Security-Token. This is needed for variations of v4 that + // present the token elsewhere. + DisableSessionToken bool } // Signer applies AWS v4 signing to given request. Use this to sign requests @@ -136,6 +144,7 @@ type httpSigner struct { DisableHeaderHoisting bool DisableURIPathEscaping bool + DisableSessionToken bool } func (s *httpSigner) Build() (signedRequest, error) { @@ -284,6 +293,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht Time: v4Internal.NewSigningTime(signingTime.UTC()), DisableHeaderHoisting: options.DisableHeaderHoisting, DisableURIPathEscaping: options.DisableURIPathEscaping, + DisableSessionToken: options.DisableSessionToken, KeyDerivator: s.keyDerivator, } @@ -360,6 +370,7 @@ func (s *Signer) PresignHTTP( IsPreSign: true, DisableHeaderHoisting: options.DisableHeaderHoisting, DisableURIPathEscaping: options.DisableURIPathEscaping, + DisableSessionToken: options.DisableSessionToken, KeyDerivator: s.keyDerivator, } @@ -502,7 +513,8 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val if s.IsPreSign { query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) - if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 { + sessionToken := s.Credentials.SessionToken + if !s.DisableSessionToken && len(sessionToken) > 0 { query.Set("X-Amz-Security-Token", sessionToken) } @@ -512,7 +524,7 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) - if len(s.Credentials.SessionToken) > 0 { + if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 { headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) } } diff --git a/codegen/sdk-codegen/aws-models/s3.json b/codegen/sdk-codegen/aws-models/s3.json index e12979d7f67..c168e72a14a 100644 --- a/codegen/sdk-codegen/aws-models/s3.json +++ b/codegen/sdk-codegen/aws-models/s3.json @@ -60,7 +60,7 @@ } ], "traits": { - "smithy.api#documentation": "

This action aborts a multipart upload. After a multipart upload is aborted, no\n additional parts can be uploaded using that upload ID. The storage consumed by any\n previously uploaded parts will be freed. However, if any part uploads are currently in\n progress, those part uploads might or might not succeed. As a result, it might be necessary\n to abort a given multipart upload multiple times in order to completely free all storage\n consumed by all parts.

\n

To verify that all parts have been removed, so you don't get charged for the part\n storage, you should call the ListParts action and ensure that\n the parts list is empty.

\n

For information about permissions required to use the multipart upload, see Multipart Upload\n and Permissions.

\n

The following operations are related to AbortMultipartUpload:

\n ", + "smithy.api#documentation": "

This operation aborts a multipart upload. After a multipart upload is aborted, no\n additional parts can be uploaded using that upload ID. The storage consumed by any\n previously uploaded parts will be freed. However, if any part uploads are currently in\n progress, those part uploads might or might not succeed. As a result, it might be necessary\n to abort a given multipart upload multiple times in order to completely free all storage\n consumed by all parts.

\n

To verify that all parts have been removed and prevent getting charged for the part\n storage, you should call the ListParts API operation and ensure that\n the parts list is empty.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to AbortMultipartUpload:

\n ", "smithy.api#examples": [ { "title": "To abort a multipart upload", @@ -100,7 +100,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name to which the upload was taking place.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name to which the upload was taking place.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -136,7 +136,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -201,6 +201,12 @@ "smithy.api#documentation": "

A container for information about access control for replicas.

" } }, + "com.amazonaws.s3#AccessKeyIdValue": { + "type": "string" + }, + "com.amazonaws.s3#AccessPointAlias": { + "type": "boolean" + }, "com.amazonaws.s3#AccessPointArn": { "type": "string" }, @@ -256,6 +262,9 @@ { "target": "com.amazonaws.s3#CreateMultipartUpload" }, + { + "target": "com.amazonaws.s3#CreateSession" + }, { "target": "com.amazonaws.s3#DeleteBucket" }, @@ -415,6 +424,9 @@ { "target": "com.amazonaws.s3#ListBuckets" }, + { + "target": "com.amazonaws.s3#ListDirectoryBuckets" + }, { "target": "com.amazonaws.s3#ListMultipartUploads" }, @@ -559,6 +571,10 @@ "Accelerate": { "documentation": "Enables this client to use S3 Transfer Acceleration endpoints.", "type": "boolean" + }, + "DisableS3ExpressSessionAuth": { + "documentation": "Disables this client's usage of Session Auth for S3Express buckets and reverts to using conventional SigV4 for those.", + "type": "boolean" } }, "smithy.rules#endpointRuleSet": { @@ -648,6 +664,16 @@ "required": false, "documentation": "When an Access Point ARN is provided and this flag is enabled, the SDK MUST use the ARN's region when constructing the endpoint instead of the client's configured region.", "type": "Boolean" + }, + "UseS3ExpressControlEndpoint": { + "required": false, + "documentation": "Internal parameter to indicate whether S3Express operation should use control plane, (ex. CreateBucket)", + "type": "Boolean" + }, + "DisableS3ExpressSessionAuth": { + "required": false, + "documentation": "Internal parameter to indicate whether S3Express session auth should be disabled", + "type": "Boolean" } }, "rules": [ @@ -811,90 +837,91 @@ { "ref": "Bucket" }, - 49, - 50, - true - ], - "assign": "hardwareType" - }, - { - "fn": "substring", - "argv": [ - { - "ref": "Bucket" - }, - 8, - 12, + 0, + 6, true ], - "assign": "regionPrefix" + "assign": "bucketSuffix" }, { - "fn": "substring", + "fn": "stringEquals", "argv": [ { - "ref": "Bucket" + "ref": "bucketSuffix" }, - 0, - 7, - true - ], - "assign": "bucketAliasSuffix" - }, + "--x-s3" + ] + } + ], + "rules": [ { - "fn": "substring", - "argv": [ + "conditions": [ { - "ref": "Bucket" - }, - 32, - 49, - true + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } ], - "assign": "outpostId" + "error": "S3Express does not support Dual-stack.", + "type": "error" }, { - "fn": "aws.partition", - "argv": [ + "conditions": [ { - "ref": "Region" + "fn": "booleanEquals", + "argv": [ + { + "ref": "Accelerate" + }, + true + ] } ], - "assign": "regionPartition" + "error": "S3Express does not support S3 Accelerate.", + "type": "error" }, - { - "fn": "stringEquals", - "argv": [ - { - "ref": "bucketAliasSuffix" - }, - "--op-s3" - ] - } - ], - "rules": [ { "conditions": [ { - "fn": "isValidHostLabel", + "fn": "isSet", "argv": [ { - "ref": "outpostId" - }, - false + "ref": "Endpoint" + } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "ref": "hardwareType" + "ref": "DisableS3ExpressSessionAuth" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "DisableS3ExpressSessionAuth" }, - "e" + true ] } ], @@ -902,12 +929,18 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "ref": "regionPrefix" + "fn": "getAttr", + "argv": [ + { + "ref": "url" + }, + "isIp" + ] }, - "beta" + true ] } ], @@ -915,50 +948,65 @@ { "conditions": [ { - "fn": "not", + "fn": "uriEncode", "argv": [ { - "fn": "isSet", - "argv": [ + "ref": "Bucket" + } + ], + "assign": "uri_encoded_bucket" + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}/{uri_encoded_bucket}{url#path}", + "properties": { + "backend": "S3Express", + "authSchemes": [ { - "ref": "Endpoint" + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" } ] - } - ] + }, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Expected a endpoint to be specified but no endpoint was found", - "type": "error" - }, + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [ { - "conditions": [ + "fn": "aws.isVirtualHostableS3Bucket", + "argv": [ { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] + "ref": "Bucket" }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], + false + ] + } + ], + "rules": [ + { + "conditions": [], "endpoint": { - "url": "https://{Bucket}.ec2.{url#authority}", + "url": "{url#scheme}://{Bucket}.{url#authority}{url#path}", "properties": { + "backend": "S3Express", "authSchemes": [ { "disableDoubleEncoding": true, "name": "sigv4", - "signingName": "s3-outposts", + "signingName": "s3express", "signingRegion": "{Region}" } ] @@ -972,21 +1020,8 @@ }, { "conditions": [], - "endpoint": { - "url": "https://{Bucket}.ec2.s3-outposts.{Region}.{regionPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "error": "S3Express bucket name is not a valid virtual hostable name.", + "type": "error" } ], "type": "tree" @@ -994,12 +1029,18 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "ref": "hardwareType" + "fn": "getAttr", + "argv": [ + { + "ref": "url" + }, + "isIp" + ] }, - "o" + true ] } ], @@ -1007,64 +1048,28 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "uriEncode", "argv": [ { - "ref": "regionPrefix" - }, - "beta" - ] + "ref": "Bucket" + } + ], + "assign": "uri_encoded_bucket" } ], "rules": [ { - "conditions": [ - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ] - } - ], - "error": "Expected a endpoint to be specified but no endpoint was found", - "type": "error" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}/{uri_encoded_bucket}{url#path}", + "properties": { + "backend": "S3Express", + "authSchemes": [ { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "endpoint": { - "url": "https://{Bucket}.op-{outpostId}.{url#authority}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" } ] }, @@ -1074,17 +1079,34 @@ } ], "type": "tree" - }, + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "aws.isVirtualHostableS3Bucket", + "argv": [ + { + "ref": "Bucket" + }, + false + ] + } + ], + "rules": [ { "conditions": [], "endpoint": { - "url": "https://{Bucket}.op-{outpostId}.s3-outposts.{Region}.{regionPartition#dnsSuffix}", + "url": "{url#scheme}://{Bucket}.{url#authority}{url#path}", "properties": { + "backend": "S3Express", "authSchemes": [ { "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", + "name": "sigv4-s3express", + "signingName": "s3express", "signingRegion": "{Region}" } ] @@ -1098,50 +1120,49 @@ }, { "conditions": [], - "error": "Unrecognized hardware type: \"Expected hardware type o or e but got {hardwareType}\"", + "error": "S3Express bucket name is not a valid virtual hostable name.", "type": "error" } ], "type": "tree" }, - { - "conditions": [], - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Bucket" - } - ] - } - ], - "rules": [ { "conditions": [ { "fn": "isSet", "argv": [ { - "ref": "Endpoint" + "ref": "UseS3ExpressControlEndpoint" } ] }, { - "fn": "not", + "fn": "booleanEquals", "argv": [ { - "fn": "isSet", + "ref": "UseS3ExpressControlEndpoint" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "uriEncode", "argv": [ { - "fn": "parseURL", + "ref": "Bucket" + } + ], + "assign": "uri_encoded_bucket" + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", "argv": [ { "ref": "Endpoint" @@ -1150,23 +1171,64 @@ } ] } - ] + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3express-control-fips.{Region}.amazonaws.com/{uri_encoded_bucket}", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.amazonaws.com/{uri_encoded_bucket}", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" } ], - "error": "Custom endpoint `{Endpoint}` was not a valid URI", - "type": "error" + "type": "tree" }, { "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "ForcePathStyle" - }, - false - ] - }, { "fn": "aws.isVirtualHostableS3Bucket", "argv": [ @@ -1181,70 +1243,63 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "DisableS3ExpressSessionAuth" } - ], - "assign": "partitionResult" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "DisableS3ExpressSessionAuth" + }, + true + ] } ], "rules": [ { "conditions": [ { - "fn": "isValidHostLabel", + "fn": "substring", "argv": [ { - "ref": "Region" + "ref": "Bucket" }, - false - ] - } - ], - "rules": [ + 6, + 14, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, { - "conditions": [ + "fn": "substring", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "Accelerate" - }, - true - ] + "ref": "Bucket" }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } + 14, + 16, + true ], - "error": "S3 Accelerate cannot be used in this region", - "type": "error" + "assign": "s3expressAvailabilityZoneDelim" }, { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] + "ref": "s3expressAvailabilityZoneDelim" }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ { "fn": "booleanEquals", "argv": [ @@ -1253,48 +1308,18 @@ }, true ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "Accelerate" - }, - false - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] } ], "endpoint": { - "url": "https://{Bucket}.s3-fips.dualstack.us-east-1.{partitionResult#dnsSuffix}", + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", "properties": { + "backend": "S3Express", "authSchemes": [ { "disableDoubleEncoding": true, "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1" + "signingName": "s3express", + "signingRegion": "{Region}" } ] }, @@ -1303,20 +1328,959 @@ "type": "endpoint" }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ { - "ref": "UseDualStack" - }, - true + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } ] }, - { - "fn": "booleanEquals", - "argv": [ - { + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 15, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 15, + 17, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Unrecognized S3Express bucket name format.", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 14, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 14, + 16, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 6, + 15, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 15, + 17, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3express-fips-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.s3express-{s3expressAvailabilityZoneId}.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Unrecognized S3Express bucket name format.", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "S3Express bucket name is not a valid virtual hostable name.", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Bucket" + } + ] + } + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "UseS3ExpressControlEndpoint" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseS3ExpressControlEndpoint" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3express-control-fips.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.amazonaws.com", + "properties": { + "backend": "S3Express", + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Bucket" + } + ] + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 49, + 50, + true + ], + "assign": "hardwareType" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 8, + 12, + true + ], + "assign": "regionPrefix" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 0, + 7, + true + ], + "assign": "bucketAliasSuffix" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "Bucket" + }, + 32, + 49, + true + ], + "assign": "outpostId" + }, + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "regionPartition" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "bucketAliasSuffix" + }, + "--op-s3" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "outpostId" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "hardwareType" + }, + "e" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "regionPrefix" + }, + "beta" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] + } + ], + "error": "Expected a endpoint to be specified but no endpoint was found", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "https://{Bucket}.ec2.{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.ec2.s3-outposts.{Region}.{regionPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "hardwareType" + }, + "o" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "regionPrefix" + }, + "beta" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] + } + ], + "error": "Expected a endpoint to be specified but no endpoint was found", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "https://{Bucket}.op-{outpostId}.{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{Bucket}.op-{outpostId}.s3-outposts.{Region}.{regionPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Unrecognized hardware type: \"Expected hardware type o or e but got {hardwareType}\"", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Bucket" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] + } + ] + } + ], + "error": "Custom endpoint `{Endpoint}` was not a valid URI", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "ForcePathStyle" + }, + false + ] + }, + { + "fn": "aws.isVirtualHostableS3Bucket", + "argv": [ + { + "ref": "Bucket" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "Region" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "Accelerate" + }, + true + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ], + "error": "S3 Accelerate cannot be used in this region", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "Accelerate" + }, + false + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "aws-global" + ] + } + ], + "endpoint": { + "url": "https://{Bucket}.s3-fips.dualstack.us-east-1.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { "ref": "UseFIPS" }, true @@ -12549,18 +13513,256 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "us-west-2", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.us-west-2.amazonaws.com/99a_b" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "99a_b", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "99a_b", + "Region": "us-west-2", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "vanilla path style@cn-north-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "cn-north-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.cn-north-1.amazonaws.com.cn/bucket-name" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::S3::ForcePathStyle": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "bucket-name", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "bucket-name", + "ForcePathStyle": true, + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "path style + fips@cn-north-1", + "expect": { + "error": "Partition does not support FIPS" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true, + "AWS::S3::ForcePathStyle": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "bucket-name", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "bucket-name", + "ForcePathStyle": true, + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "path style + accelerate = error@cn-north-1", + "expect": { + "error": "Path-style addressing cannot be used with S3 Accelerate" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::S3::ForcePathStyle": true, + "AWS::S3::Accelerate": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "bucket-name", + "Key": "key" + } + } + ], + "params": { + "Accelerate": true, + "Bucket": "bucket-name", + "ForcePathStyle": true, + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "path style + dualstack@cn-north-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "cn-north-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.dualstack.cn-north-1.amazonaws.com.cn/bucket-name" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseDualStack": true, + "AWS::S3::ForcePathStyle": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "bucket-name", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "bucket-name", + "ForcePathStyle": true, + "Region": "cn-north-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "path style + arn is error@cn-north-1", + "expect": { + "error": "Path-style addressing cannot be used with ARN buckets" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::S3::ForcePathStyle": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "ForcePathStyle": true, + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "path style + invalid DNS name@cn-north-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "cn-north-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.cn-north-1.amazonaws.com.cn/99a_b" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::S3::ForcePathStyle": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "99a_b", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "99a_b", + "ForcePathStyle": true, + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "no path style + invalid DNS name@cn-north-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3.us-west-2.amazonaws.com/99a_b" + "url": "https://s3.cn-north-1.amazonaws.com.cn/99a_b" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "cn-north-1" }, "operationName": "GetObject", "operationParams": { @@ -12572,13 +13774,13 @@ "params": { "Accelerate": false, "Bucket": "99a_b", - "Region": "us-west-2", + "Region": "cn-north-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "vanilla path style@cn-north-1", + "documentation": "vanilla path style@af-south-1", "expect": { "endpoint": { "properties": { @@ -12586,18 +13788,18 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "cn-north-1", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3.cn-north-1.amazonaws.com.cn/bucket-name" + "url": "https://s3.af-south-1.amazonaws.com/bucket-name" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", + "AWS::Region": "af-south-1", "AWS::S3::ForcePathStyle": true }, "operationName": "GetObject", @@ -12611,20 +13813,32 @@ "Accelerate": false, "Bucket": "bucket-name", "ForcePathStyle": true, - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + fips@cn-north-1", + "documentation": "path style + fips@af-south-1", "expect": { - "error": "Partition does not support FIPS" + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingName": "s3", + "signingRegion": "af-south-1", + "disableDoubleEncoding": true, + "name": "sigv4" + } + ] + }, + "url": "https://s3-fips.af-south-1.amazonaws.com/bucket-name" + } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", + "AWS::Region": "af-south-1", "AWS::UseFIPS": true, "AWS::S3::ForcePathStyle": true }, @@ -12639,20 +13853,20 @@ "Accelerate": false, "Bucket": "bucket-name", "ForcePathStyle": true, - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": true } }, { - "documentation": "path style + accelerate = error@cn-north-1", + "documentation": "path style + accelerate = error@af-south-1", "expect": { "error": "Path-style addressing cannot be used with S3 Accelerate" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", + "AWS::Region": "af-south-1", "AWS::S3::ForcePathStyle": true, "AWS::S3::Accelerate": true }, @@ -12667,13 +13881,13 @@ "Accelerate": true, "Bucket": "bucket-name", "ForcePathStyle": true, - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + dualstack@cn-north-1", + "documentation": "path style + dualstack@af-south-1", "expect": { "endpoint": { "properties": { @@ -12681,18 +13895,18 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "cn-north-1", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3.dualstack.cn-north-1.amazonaws.com.cn/bucket-name" + "url": "https://s3.dualstack.af-south-1.amazonaws.com/bucket-name" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", + "AWS::Region": "af-south-1", "AWS::UseDualStack": true, "AWS::S3::ForcePathStyle": true }, @@ -12707,20 +13921,20 @@ "Accelerate": false, "Bucket": "bucket-name", "ForcePathStyle": true, - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": true, "UseFIPS": false } }, { - "documentation": "path style + arn is error@cn-north-1", + "documentation": "path style + arn is error@af-south-1", "expect": { "error": "Path-style addressing cannot be used with ARN buckets" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", + "AWS::Region": "af-south-1", "AWS::S3::ForcePathStyle": true }, "operationName": "GetObject", @@ -12734,13 +13948,13 @@ "Accelerate": false, "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + invalid DNS name@cn-north-1", + "documentation": "path style + invalid DNS name@af-south-1", "expect": { "endpoint": { "properties": { @@ -12748,18 +13962,18 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "cn-north-1", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3.cn-north-1.amazonaws.com.cn/99a_b" + "url": "https://s3.af-south-1.amazonaws.com/99a_b" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", + "AWS::Region": "af-south-1", "AWS::S3::ForcePathStyle": true }, "operationName": "GetObject", @@ -12773,13 +13987,13 @@ "Accelerate": false, "Bucket": "99a_b", "ForcePathStyle": true, - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "no path style + invalid DNS name@cn-north-1", + "documentation": "no path style + invalid DNS name@af-south-1", "expect": { "endpoint": { "properties": { @@ -12787,18 +14001,18 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "cn-north-1", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3.cn-north-1.amazonaws.com.cn/99a_b" + "url": "https://s3.af-south-1.amazonaws.com/99a_b" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1" + "AWS::Region": "af-south-1" }, "operationName": "GetObject", "operationParams": { @@ -12810,13 +14024,13 @@ "params": { "Accelerate": false, "Bucket": "99a_b", - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "vanilla path style@af-south-1", + "documentation": "virtual addressing + private link@us-west-2", "expect": { "endpoint": { "properties": { @@ -12824,19 +14038,19 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "af-south-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3.af-south-1.amazonaws.com/bucket-name" + "url": "http://bucket-name.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::S3::ForcePathStyle": true + "AWS::Region": "us-west-2", + "SDK::Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" }, "operationName": "GetObject", "operationParams": { @@ -12848,34 +14062,35 @@ "params": { "Accelerate": false, "Bucket": "bucket-name", - "ForcePathStyle": true, - "Region": "af-south-1", + "ForcePathStyle": false, + "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "us-west-2", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + fips@af-south-1", + "documentation": "path style + private link@us-west-2", "expect": { "endpoint": { "properties": { "authSchemes": [ { + "name": "sigv4", "signingName": "s3", - "signingRegion": "af-south-1", - "disableDoubleEncoding": true, - "name": "sigv4" + "signingRegion": "us-west-2", + "disableDoubleEncoding": true } ] }, - "url": "https://s3-fips.af-south-1.amazonaws.com/bucket-name" + "url": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com/bucket-name" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseFIPS": true, + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "AWS::S3::ForcePathStyle": true }, "operationName": "GetObject", @@ -12889,21 +14104,80 @@ "Accelerate": false, "Bucket": "bucket-name", "ForcePathStyle": true, - "Region": "af-south-1", + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "us-west-2", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "SDK::Host + FIPS@us-west-2", + "expect": { + "error": "A custom endpoint cannot be combined with FIPS" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true, + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "bucket-name", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "bucket-name", + "ForcePathStyle": false, + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "us-west-2", "UseDualStack": false, "UseFIPS": true } }, { - "documentation": "path style + accelerate = error@af-south-1", + "documentation": "SDK::Host + DualStack@us-west-2", "expect": { - "error": "Path-style addressing cannot be used with S3 Accelerate" + "error": "Cannot set dual-stack in combination with a custom endpoint." }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::S3::ForcePathStyle": true, + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true, + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "bucket-name", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "bucket-name", + "ForcePathStyle": false, + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "us-west-2", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "SDK::HOST + accelerate@us-west-2", + "expect": { + "error": "A custom endpoint cannot be combined with S3 Accelerate" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "SDK::Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "AWS::S3::Accelerate": true }, "operationName": "GetObject", @@ -12914,16 +14188,97 @@ } ], "params": { - "Accelerate": true, + "Accelerate": true, + "Bucket": "bucket-name", + "ForcePathStyle": false, + "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "us-west-2", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "SDK::Host + access point ARN@us-west-2", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://myendpoint-123456789012.beta.example.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://beta.example.com" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, + "Endpoint": "https://beta.example.com", + "Region": "us-west-2", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "virtual addressing + private link@cn-north-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "cn-north-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket-name.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "bucket-name", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, "Bucket": "bucket-name", - "ForcePathStyle": true, - "Region": "af-south-1", + "ForcePathStyle": false, + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "cn-north-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + dualstack@af-south-1", + "documentation": "path style + private link@cn-north-1", "expect": { "endpoint": { "properties": { @@ -12931,19 +14286,19 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "af-south-1", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3.dualstack.af-south-1.amazonaws.com/bucket-name" + "url": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com/bucket-name" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseDualStack": true, + "AWS::Region": "cn-north-1", + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "AWS::S3::ForcePathStyle": true }, "operationName": "GetObject", @@ -12957,79 +14312,72 @@ "Accelerate": false, "Bucket": "bucket-name", "ForcePathStyle": true, - "Region": "af-south-1", - "UseDualStack": true, + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "cn-north-1", + "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + arn is error@af-south-1", + "documentation": "FIPS@cn-north-1", "expect": { - "error": "Path-style addressing cannot be used with ARN buckets" + "error": "Partition does not support FIPS" }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::S3::ForcePathStyle": true - }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Key": "key" - } - } - ], "params": { "Accelerate": false, - "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "ForcePathStyle": true, - "Region": "af-south-1", + "Bucket": "bucket-name", + "ForcePathStyle": false, + "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "path style + invalid DNS name@af-south-1", + "documentation": "SDK::Host + DualStack@cn-north-1", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "af-south-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://s3.af-south-1.amazonaws.com/99a_b" - } + "error": "Cannot set dual-stack in combination with a custom endpoint." }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::S3::ForcePathStyle": true + "AWS::Region": "cn-north-1", + "AWS::UseDualStack": true, + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" }, "operationName": "GetObject", "operationParams": { - "Bucket": "99a_b", + "Bucket": "bucket-name", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "99a_b", - "ForcePathStyle": true, - "Region": "af-south-1", + "Bucket": "bucket-name", + "ForcePathStyle": false, + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "cn-north-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "SDK::HOST + accelerate@cn-north-1", + "expect": { + "error": "A custom endpoint cannot be combined with S3 Accelerate" + }, + "params": { + "Accelerate": true, + "Bucket": "bucket-name", + "ForcePathStyle": false, + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "cn-north-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "no path style + invalid DNS name@af-south-1", + "documentation": "SDK::Host + access point ARN@cn-north-1", "expect": { "endpoint": { "properties": { @@ -13037,36 +14385,39 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "af-south-1", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3.af-south-1.amazonaws.com/99a_b" + "url": "https://myendpoint-123456789012.beta.example.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1" + "AWS::Region": "cn-north-1", + "SDK::Endpoint": "https://beta.example.com" }, "operationName": "GetObject", "operationParams": { - "Bucket": "99a_b", + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "99a_b", - "Region": "af-south-1", + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, + "Endpoint": "https://beta.example.com", + "Region": "cn-north-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "virtual addressing + private link@us-west-2", + "documentation": "virtual addressing + private link@af-south-1", "expect": { "endpoint": { "properties": { @@ -13074,19 +14425,19 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "us-west-2", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "http://bucket-name.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "url": "https://bucket-name.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "AWS::Region": "af-south-1", + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" }, "operationName": "GetObject", "operationParams": { @@ -13099,14 +14450,14 @@ "Accelerate": false, "Bucket": "bucket-name", "ForcePathStyle": false, - "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "us-west-2", + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + private link@us-west-2", + "documentation": "path style + private link@af-south-1", "expect": { "endpoint": { "properties": { @@ -13114,7 +14465,7 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "us-west-2", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] @@ -13125,7 +14476,7 @@ "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", + "AWS::Region": "af-south-1", "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "AWS::S3::ForcePathStyle": true }, @@ -13141,20 +14492,20 @@ "Bucket": "bucket-name", "ForcePathStyle": true, "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "us-west-2", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "SDK::Host + FIPS@us-west-2", + "documentation": "SDK::Host + FIPS@af-south-1", "expect": { "error": "A custom endpoint cannot be combined with FIPS" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", + "AWS::Region": "af-south-1", "AWS::UseFIPS": true, "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" }, @@ -13170,20 +14521,20 @@ "Bucket": "bucket-name", "ForcePathStyle": false, "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "us-west-2", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": true } }, { - "documentation": "SDK::Host + DualStack@us-west-2", + "documentation": "SDK::Host + DualStack@af-south-1", "expect": { "error": "Cannot set dual-stack in combination with a custom endpoint." }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", + "AWS::Region": "af-south-1", "AWS::UseDualStack": true, "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" }, @@ -13199,21 +14550,21 @@ "Bucket": "bucket-name", "ForcePathStyle": false, "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "us-west-2", + "Region": "af-south-1", "UseDualStack": true, "UseFIPS": false } }, { - "documentation": "SDK::HOST + accelerate@us-west-2", + "documentation": "SDK::HOST + accelerate@af-south-1", "expect": { "error": "A custom endpoint cannot be combined with S3 Accelerate" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "AWS::Region": "af-south-1", + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "AWS::S3::Accelerate": true }, "operationName": "GetObject", @@ -13227,14 +14578,92 @@ "Accelerate": true, "Bucket": "bucket-name", "ForcePathStyle": false, - "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "af-south-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "SDK::Host + access point ARN@af-south-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "af-south-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://myendpoint-123456789012.beta.example.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "af-south-1", + "SDK::Endpoint": "https://beta.example.com" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, + "Endpoint": "https://beta.example.com", + "Region": "af-south-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "vanilla access point arn@us-west-2", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://myendpoint-123456789012.s3-accesspoint.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Key": "key" + } + } + ], + "params": { + "Accelerate": false, + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "SDK::Host + access point ARN@us-west-2", + "documentation": "access point arn + FIPS@us-west-2", "expect": { "endpoint": { "properties": { @@ -13247,14 +14676,41 @@ } ] }, - "url": "https://myendpoint-123456789012.beta.example.com" + "url": "https://myendpoint-123456789012.s3-accesspoint-fips.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Key": "key" + } } + ], + "params": { + "Accelerate": false, + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, + "Region": "us-west-2", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "access point arn + accelerate = error@us-west-2", + "expect": { + "error": "Access Points do not support S3 Accelerate" }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://beta.example.com" + "AWS::S3::Accelerate": true }, "operationName": "GetObject", "operationParams": { @@ -13264,17 +14720,16 @@ } ], "params": { - "Accelerate": false, + "Accelerate": true, "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, - "Endpoint": "https://beta.example.com", "Region": "us-west-2", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "virtual addressing + private link@cn-north-1", + "documentation": "access point arn + FIPS + DualStack@us-west-2", "expect": { "endpoint": { "properties": { @@ -13282,39 +14737,39 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "cn-north-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://bucket-name.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "url": "https://myendpoint-123456789012.s3-accesspoint-fips.dualstack.us-west-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-west-2", + "UseDualStack": true, + "UseFIPS": true } }, { - "documentation": "path style + private link@cn-north-1", + "documentation": "vanilla access point arn@cn-north-1", "expect": { "endpoint": { "properties": { @@ -13327,41 +14782,38 @@ } ] }, - "url": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com/bucket-name" + "url": "https://myendpoint-123456789012.s3-accesspoint.cn-north-1.amazonaws.com.cn" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "AWS::S3::ForcePathStyle": true + "AWS::Region": "cn-north-1" }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "bucket-name", - "ForcePathStyle": true, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "FIPS@cn-north-1", + "documentation": "access point arn + FIPS@cn-north-1", "expect": { "error": "Partition does not support FIPS" }, "params": { "Accelerate": false, - "Bucket": "bucket-name", + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": false, @@ -13369,51 +14821,48 @@ } }, { - "documentation": "SDK::Host + DualStack@cn-north-1", + "documentation": "access point arn + accelerate = error@cn-north-1", "expect": { - "error": "Cannot set dual-stack in combination with a custom endpoint." + "error": "Access Points do not support S3 Accelerate" }, "operationInputs": [ { "builtInParams": { "AWS::Region": "cn-north-1", - "AWS::UseDualStack": true, - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "AWS::S3::Accelerate": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "Key": "key" } } ], "params": { - "Accelerate": false, - "Bucket": "bucket-name", + "Accelerate": true, + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", - "UseDualStack": true, + "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "SDK::HOST + accelerate@cn-north-1", + "documentation": "access point arn + FIPS + DualStack@cn-north-1", "expect": { - "error": "A custom endpoint cannot be combined with S3 Accelerate" + "error": "Partition does not support FIPS" }, "params": { - "Accelerate": true, - "Bucket": "bucket-name", + "Accelerate": false, + "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseDualStack": true, + "UseFIPS": true } }, { - "documentation": "SDK::Host + access point ARN@cn-north-1", + "documentation": "vanilla access point arn@af-south-1", "expect": { "endpoint": { "properties": { @@ -13421,39 +14870,37 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "cn-north-1", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.beta.example.com" + "url": "https://myendpoint-123456789012.s3-accesspoint.af-south-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "af-south-1" }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, - "Endpoint": "https://beta.example.com", - "Region": "cn-north-1", + "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "virtual addressing + private link@af-south-1", + "documentation": "access point arn + FIPS@af-south-1", "expect": { "endpoint": { "properties": { @@ -13466,34 +14913,60 @@ } ] }, - "url": "https://bucket-name.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "url": "https://myendpoint-123456789012.s3-accesspoint-fips.af-south-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { "AWS::Region": "af-south-1", - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "AWS::UseFIPS": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, + "Region": "af-south-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "access point arn + accelerate = error@af-south-1", + "expect": { + "error": "Access Points do not support S3 Accelerate" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "af-south-1", + "AWS::S3::Accelerate": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Key": "key" + } + } + ], + "params": { + "Accelerate": true, + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "path style + private link@af-south-1", + "documentation": "access point arn + FIPS + DualStack@af-south-1", "expect": { "endpoint": { "properties": { @@ -13506,874 +14979,954 @@ } ] }, - "url": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com/bucket-name" + "url": "https://myendpoint-123456789012.s3-accesspoint-fips.dualstack.af-south-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { "AWS::Region": "af-south-1", - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "AWS::S3::ForcePathStyle": true + "AWS::UseFIPS": true, + "AWS::UseDualStack": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "bucket-name", - "ForcePathStyle": true, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "ForcePathStyle": false, "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseDualStack": true, + "UseFIPS": true } }, { - "documentation": "SDK::Host + FIPS@af-south-1", + "documentation": "S3 outposts vanilla test", "expect": { - "error": "A custom endpoint cannot be combined with FIPS" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://reports-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com" + } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseFIPS": true, - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "AWS::Region": "us-west-2" }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports", "Key": "key" } } ], "params": { - "Accelerate": false, - "Bucket": "bucket-name", - "ForcePathStyle": false, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "af-south-1", + "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": true + "Accelerate": false, + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" } }, { - "documentation": "SDK::Host + DualStack@af-south-1", + "documentation": "S3 outposts custom endpoint", "expect": { - "error": "Cannot set dual-stack in combination with a custom endpoint." + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://reports-123456789012.op-01234567890123456.example.amazonaws.com" + } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseDualStack": true, - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://example.amazonaws.com" }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports", "Key": "key" } } ], "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false, "Accelerate": false, - "Bucket": "bucket-name", - "ForcePathStyle": false, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "af-south-1", - "UseDualStack": true, - "UseFIPS": false + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports", + "Endpoint": "https://example.amazonaws.com" } }, { - "documentation": "SDK::HOST + accelerate@af-south-1", + "documentation": "outposts arn with region mismatch and UseArnRegion=false", "expect": { - "error": "A custom endpoint cannot be combined with S3 Accelerate" + "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "AWS::S3::Accelerate": true + "AWS::Region": "us-west-2", + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "bucket-name", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Key": "key" } } ], "params": { - "Accelerate": true, - "Bucket": "bucket-name", + "Accelerate": false, + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "ForcePathStyle": false, - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "af-south-1", + "UseArnRegion": false, + "Region": "us-west-2", "UseDualStack": false, "UseFIPS": false } - }, - { - "documentation": "SDK::Host + access point ARN@af-south-1", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "af-south-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://myendpoint-123456789012.beta.example.com" - } + }, + { + "documentation": "outposts arn with region mismatch, custom region and UseArnRegion=false", + "expect": { + "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://example.com", + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Endpoint": "https://example.com", "ForcePathStyle": false, - "Endpoint": "https://beta.example.com", - "Region": "af-south-1", + "UseArnRegion": false, + "Region": "us-west-2", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "vanilla access point arn@us-west-2", + "documentation": "outposts arn with region mismatch and UseArnRegion=true", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-west-2", + "signingName": "s3-outposts", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.s3-accesspoint.us-west-2.amazonaws.com" + "url": "https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "us-west-2", + "AWS::S3::UseArnRegion": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "ForcePathStyle": false, + "UseArnRegion": true, "Region": "us-west-2", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "access point arn + FIPS@us-west-2", + "documentation": "outposts arn with region mismatch and UseArnRegion unset", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-west-2", + "signingName": "s3-outposts", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.s3-accesspoint-fips.us-west-2.amazonaws.com" + "url": "https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseFIPS": true + "AWS::Region": "us-west-2" }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Key": "key" } } ], "params": { "Accelerate": false, - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "access point arn + accelerate = error@us-west-2", + "documentation": "outposts arn with partition mismatch and UseArnRegion=true", "expect": { - "error": "Access Points do not support S3 Accelerate" + "error": "Client was configured for partition `aws` but ARN (`arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint`) has `aws-cn`" }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-west-2", - "AWS::S3::Accelerate": true + "AWS::S3::UseArnRegion": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Key": "key" } } ], "params": { - "Accelerate": true, - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Accelerate": false, + "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "ForcePathStyle": false, + "UseArnRegion": true, "Region": "us-west-2", "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "access point arn + FIPS + DualStack@us-west-2", + "documentation": "ARN with UseGlobalEndpoint and use-east-1 region uses the regional endpoint", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-west-2", + "signingName": "s3-outposts", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.s3-accesspoint-fips.dualstack.us-west-2.amazonaws.com" + "url": "https://reports-123456789012.op-01234567890123456.s3-outposts.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseFIPS": true, - "AWS::UseDualStack": true + "AWS::Region": "us-east-1", + "AWS::S3::UseGlobalEndpoint": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/accesspoint/reports", "Key": "key" } } ], "params": { + "Region": "us-east-1", + "UseGlobalEndpoint": true, + "UseFIPS": false, + "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "us-west-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/accesspoint/reports" + } + }, + { + "documentation": "S3 outposts does not support dualstack", + "expect": { + "error": "S3 Outposts does not support Dual-stack" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, - "UseFIPS": true + "Accelerate": false, + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" } }, { - "documentation": "vanilla access point arn@cn-north-1", + "documentation": "S3 outposts does not support fips", + "expect": { + "error": "S3 Outposts does not support FIPS" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" + } + }, + { + "documentation": "S3 outposts does not support accelerate", + "expect": { + "error": "S3 Outposts does not support S3 Accelerate" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": true, + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" + } + }, + { + "documentation": "validates against subresource", + "expect": { + "error": "Invalid Arn: Outpost Access Point ARN contains sub resources" + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:mybucket:object:foo" + } + }, + { + "documentation": "object lambda @us-east-1", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "cn-north-1", + "signingName": "s3-object-lambda", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.s3-accesspoint.cn-north-1.amazonaws.com.cn" + "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1" + "AWS::Region": "us-east-1", + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Accelerate": false, - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "cn-north-1", + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "access point arn + FIPS@cn-north-1", - "expect": { - "error": "Partition does not support FIPS" - }, - "params": { "Accelerate": false, - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "access point arn + accelerate = error@cn-north-1", + "documentation": "object lambda @us-west-2", "expect": { - "error": "Access Points do not support S3 Accelerate" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-object-lambda", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://mybanner-123456789012.s3-object-lambda.us-west-2.amazonaws.com" + } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", - "AWS::S3::Accelerate": true + "AWS::Region": "us-west-2", + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Accelerate": true, - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "cn-north-1", + "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner" } }, { - "documentation": "access point arn + FIPS + DualStack@cn-north-1", + "documentation": "object lambda, colon resource deliminator @us-west-2", "expect": { - "error": "Partition does not support FIPS" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-object-lambda", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://mybanner-123456789012.s3-object-lambda.us-west-2.amazonaws.com" + } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::S3::UseArnRegion": false + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:mybanner", + "Key": "key" + } + } + ], "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:mybanner" } }, { - "documentation": "vanilla access point arn@af-south-1", + "documentation": "object lambda @us-east-1, client region us-west-2, useArnRegion=true", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "af-south-1", + "signingName": "s3-object-lambda", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.s3-accesspoint.af-south-1.amazonaws.com" + "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1" + "AWS::Region": "us-west-2", + "AWS::S3::UseArnRegion": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Accelerate": false, - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "af-south-1", + "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "access point arn + FIPS@af-south-1", + "documentation": "object lambda @us-east-1, client region s3-external-1, useArnRegion=true", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "af-south-1", + "signingName": "s3-object-lambda", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.s3-accesspoint-fips.af-south-1.amazonaws.com" + "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseFIPS": true + "AWS::Region": "s3-external-1", + "AWS::S3::UseArnRegion": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Accelerate": false, - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "af-south-1", + "Region": "s3-external-1", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": true + "Accelerate": false, + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "access point arn + accelerate = error@af-south-1", + "documentation": "object lambda @us-east-1, client region s3-external-1, useArnRegion=false", "expect": { - "error": "Access Points do not support S3 Accelerate" + "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `s3-external-1` and UseArnRegion is `false`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::S3::Accelerate": true + "AWS::Region": "s3-external-1", + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Accelerate": true, - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "af-south-1", + "Region": "s3-external-1", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "access point arn + FIPS + DualStack@af-south-1", + "documentation": "object lambda @us-east-1, client region aws-global, useArnRegion=true", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "af-south-1", + "signingName": "s3-object-lambda", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myendpoint-123456789012.s3-accesspoint-fips.dualstack.af-south-1.amazonaws.com" + "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseFIPS": true, - "AWS::UseDualStack": true + "AWS::Region": "aws-global", + "AWS::S3::UseArnRegion": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", - "ForcePathStyle": false, - "Region": "af-south-1", - "UseDualStack": true, - "UseFIPS": true + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "S3 outposts vanilla test", + "documentation": "object lambda @us-east-1, client region aws-global, useArnRegion=false", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-west-2", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://reports-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com" - } + "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "aws-global", + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports", + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Region": "us-west-2", + "Region": "aws-global", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "S3 outposts custom endpoint", + "documentation": "object lambda @cn-north-1, client region us-west-2 (cross partition), useArnRegion=true", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-west-2", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://reports-123456789012.op-01234567890123456.example.amazonaws.com" - } + "error": "Client was configured for partition `aws` but ARN (`arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner`) has `aws-cn`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://example.amazonaws.com" + "AWS::Region": "aws-global", + "AWS::S3::UseArnRegion": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports", + "Bucket": "arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Region": "us-west-2", + "Region": "aws-global", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports", - "Endpoint": "https://example.amazonaws.com" + "UseArnRegion": true, + "Bucket": "arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "outposts arn with region mismatch and UseArnRegion=false", + "documentation": "object lambda with dualstack", "expect": { - "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" + "error": "S3 Object Lambda does not support Dual-stack" }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-west-2", + "AWS::UseDualStack": true, "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": true, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "ForcePathStyle": false, "UseArnRegion": false, - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner" } }, { - "documentation": "outposts arn with region mismatch, custom region and UseArnRegion=false", + "documentation": "object lambda @us-gov-east-1", "expect": { - "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" - }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://example.com", - "AWS::S3::UseArnRegion": false + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-object-lambda", + "signingRegion": "us-gov-east-1", + "disableDoubleEncoding": true + } + ] }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Key": "key" - } + "url": "https://mybanner-123456789012.s3-object-lambda.us-gov-east-1.amazonaws.com" } - ], + }, "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Endpoint": "https://example.com", - "ForcePathStyle": false, "UseArnRegion": false, - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "Bucket": "arn:aws-us-gov:s3-object-lambda:us-gov-east-1:123456789012:accesspoint/mybanner" } }, { - "documentation": "outposts arn with region mismatch and UseArnRegion=true", + "documentation": "object lambda @us-gov-east-1, with fips", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", + "signingName": "s3-object-lambda", + "signingRegion": "us-gov-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-east-1.amazonaws.com" + "url": "https://mybanner-123456789012.s3-object-lambda-fips.us-gov-east-1.amazonaws.com" } }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseArnRegion": false, + "Bucket": "arn:aws-us-gov:s3-object-lambda:us-gov-east-1:123456789012:accesspoint/mybanner" + } + }, + { + "documentation": "object lambda @cn-north-1, with fips", + "expect": { + "error": "Partition does not support FIPS" + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseArnRegion": false, + "Bucket": "arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner" + } + }, + { + "documentation": "object lambda with accelerate", + "expect": { + "error": "S3 Object Lambda does not support S3 Accelerate" + }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-west-2", - "AWS::S3::UseArnRegion": true + "AWS::S3::Accelerate": true, + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "ForcePathStyle": false, - "UseArnRegion": true, "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": true, + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner" } }, { - "documentation": "outposts arn with region mismatch and UseArnRegion unset", + "documentation": "object lambda with invalid arn - bad service and someresource", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-east-1.amazonaws.com" - } + "error": "Invalid ARN: Unrecognized format: arn:aws:sqs:us-west-2:123456789012:someresource (type: someresource)" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "us-west-2", + "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Bucket": "arn:aws:sqs:us-west-2:123456789012:someresource", "Key": "key" } } ], "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "ForcePathStyle": false, + "UseArnRegion": false, + "Bucket": "arn:aws:sqs:us-west-2:123456789012:someresource" + } + }, + { + "documentation": "object lambda with invalid arn - invalid resource", + "expect": { + "error": "Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `bucket_name`" + }, + "params": { "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:bucket_name:mybucket" } }, { - "documentation": "outposts arn with partition mismatch and UseArnRegion=true", + "documentation": "object lambda with invalid arn - missing region", "expect": { - "error": "Client was configured for partition `aws` but ARN (`arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint`) has `aws-cn`" + "error": "Invalid ARN: bucket ARN is missing a region" }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3::UseArnRegion": true - }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Key": "key" - } - } - ], "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "ForcePathStyle": false, - "UseArnRegion": true, + "UseArnRegion": false, + "Bucket": "arn:aws:s3-object-lambda::123456789012:accesspoint/mybanner" + } + }, + { + "documentation": "object lambda with invalid arn - missing account-id", + "expect": { + "error": "Invalid ARN: Missing account id" + }, + "params": { "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-west-2::accesspoint/mybanner" } }, { - "documentation": "ARN with UseGlobalEndpoint and use-east-1 region uses the regional endpoint", + "documentation": "object lambda with invalid arn - account id contains invalid characters", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://reports-123456789012.op-01234567890123456.s3-outposts.us-east-1.amazonaws.com" - } + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `123.45678.9012`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", - "AWS::S3::UseGlobalEndpoint": true + "AWS::Region": "us-west-2", + "AWS::S3::UseArnRegion": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/accesspoint/reports", + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123.45678.9012:accesspoint:mybucket", "Key": "key" } } ], "params": { - "Region": "us-east-1", - "UseGlobalEndpoint": true, + "Region": "us-west-2", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/accesspoint/reports" + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123.45678.9012:accesspoint:mybucket" } }, { - "documentation": "S3 outposts does not support dualstack", + "documentation": "object lambda with invalid arn - missing access point name", "expect": { - "error": "S3 Outposts does not support Dual-stack" + "error": "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided" }, "params": { - "Region": "us-east-1", + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": true, + "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint" } }, { - "documentation": "S3 outposts does not support fips", + "documentation": "object lambda with invalid arn - access point name contains invalid character: *", "expect": { - "error": "S3 Outposts does not support FIPS" + "error": "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `*`" }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:*" } }, { - "documentation": "S3 outposts does not support accelerate", + "documentation": "object lambda with invalid arn - access point name contains invalid character: .", "expect": { - "error": "S3 Outposts does not support S3 Accelerate" + "error": "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `my.bucket`" }, "params": { - "Region": "us-east-1", + "Region": "us-west-2", "UseFIPS": false, "UseDualStack": false, - "Accelerate": true, - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost/op-01234567890123456/accesspoint/reports" + "Accelerate": false, + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:my.bucket" } }, { - "documentation": "validates against subresource", + "documentation": "object lambda with invalid arn - access point name contains sub resources", "expect": { - "error": "Invalid Arn: Outpost Access Point ARN contains sub resources" + "error": "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`." }, "params": { "Region": "us-west-2", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:mybucket:object:foo" + "UseArnRegion": true, + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:mybucket:object:foo" } }, { - "documentation": "object lambda @us-east-1", + "documentation": "object lambda with custom endpoint", "expect": { "endpoint": { "properties": { @@ -14381,52 +15934,42 @@ { "name": "sigv4", "signingName": "s3-object-lambda", - "signingRegion": "us-east-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" + "url": "https://mybanner-123456789012.my-endpoint.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://my-endpoint.com", "AWS::S3::UseArnRegion": false }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Region": "us-east-1", + "Region": "us-west-2", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", + "Endpoint": "https://my-endpoint.com" } }, { - "documentation": "object lambda @us-west-2", + "documentation": "object lambda arn with region mismatch and UseArnRegion=false", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-object-lambda", - "signingRegion": "us-west-2", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://mybanner-123456789012.s3-object-lambda.us-west-2.amazonaws.com" - } + "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" }, "operationInputs": [ { @@ -14436,22 +15979,23 @@ }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", "Key": "key" } } ], "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false, "Accelerate": false, + "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", + "ForcePathStyle": false, "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner" + "Region": "us-west-2", + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "object lambda, colon resource deliminator @us-west-2", + "documentation": "WriteGetObjectResponse @ us-west-2", "expect": { "endpoint": { "properties": { @@ -14464,33 +16008,31 @@ } ] }, - "url": "https://mybanner-123456789012.s3-object-lambda.us-west-2.amazonaws.com" + "url": "https://s3-object-lambda.us-west-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3::UseArnRegion": false + "AWS::Region": "us-west-2" }, - "operationName": "GetObject", + "operationName": "WriteGetObjectResponse", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:mybanner", - "Key": "key" + "RequestRoute": "RequestRoute", + "RequestToken": "RequestToken" } } ], "params": { + "Accelerate": false, + "UseObjectLambdaEndpoint": true, "Region": "us-west-2", - "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:mybanner" + "UseFIPS": false } }, { - "documentation": "object lambda @us-east-1, client region us-west-2, useArnRegion=true", + "documentation": "WriteGetObjectResponse with custom endpoint", "expect": { "endpoint": { "properties": { @@ -14498,38 +16040,38 @@ { "name": "sigv4", "signingName": "s3-object-lambda", - "signingRegion": "us-east-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" + "url": "https://my-endpoint.com" } }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-west-2", - "AWS::S3::UseArnRegion": true + "SDK::Endpoint": "https://my-endpoint.com" }, - "operationName": "GetObject", + "operationName": "WriteGetObjectResponse", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", - "Key": "key" + "RequestRoute": "RequestRoute", + "RequestToken": "RequestToken" } } ], "params": { + "Accelerate": false, + "UseObjectLambdaEndpoint": true, + "Endpoint": "https://my-endpoint.com", "Region": "us-west-2", - "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" + "UseFIPS": false } }, { - "documentation": "object lambda @us-east-1, client region s3-external-1, useArnRegion=true", + "documentation": "WriteGetObjectResponse @ us-east-1", "expect": { "endpoint": { "properties": { @@ -14542,60 +16084,31 @@ } ] }, - "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" - } - }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "s3-external-1", - "AWS::S3::UseArnRegion": true - }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", - "Key": "key" - } + "url": "https://s3-object-lambda.us-east-1.amazonaws.com" } - ], - "params": { - "Region": "s3-external-1", - "UseFIPS": false, - "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" - } - }, - { - "documentation": "object lambda @us-east-1, client region s3-external-1, useArnRegion=false", - "expect": { - "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `s3-external-1` and UseArnRegion is `false`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "s3-external-1", - "AWS::S3::UseArnRegion": false + "AWS::Region": "us-east-1" }, - "operationName": "GetObject", + "operationName": "WriteGetObjectResponse", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", - "Key": "key" + "RequestRoute": "RequestRoute", + "RequestToken": "RequestToken" } } ], "params": { - "Region": "s3-external-1", - "UseFIPS": false, - "UseDualStack": false, "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" + "UseObjectLambdaEndpoint": true, + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "object lambda @us-east-1, client region aws-global, useArnRegion=true", + "documentation": "WriteGetObjectResponse with fips", "expect": { "endpoint": { "properties": { @@ -14608,974 +16121,1184 @@ } ] }, - "url": "https://mybanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com" + "url": "https://s3-object-lambda-fips.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "aws-global", - "AWS::S3::UseArnRegion": true + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true }, - "operationName": "GetObject", + "operationName": "WriteGetObjectResponse", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", - "Key": "key" + "RequestRoute": "RequestRoute", + "RequestToken": "RequestToken" } } ], "params": { - "Region": "aws-global", - "UseFIPS": false, - "UseDualStack": false, "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" + "UseObjectLambdaEndpoint": true, + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true } }, { - "documentation": "object lambda @us-east-1, client region aws-global, useArnRegion=false", + "documentation": "WriteGetObjectResponse with dualstack", "expect": { - "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`" + "error": "S3 Object Lambda does not support Dual-stack" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "aws-global", - "AWS::S3::UseArnRegion": false + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true }, - "operationName": "GetObject", + "operationName": "WriteGetObjectResponse", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", - "Key": "key" + "RequestRoute": "RequestRoute", + "RequestToken": "RequestToken" } } ], "params": { - "Region": "aws-global", - "UseFIPS": false, + "Accelerate": false, + "UseObjectLambdaEndpoint": true, + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "WriteGetObjectResponse with accelerate", + "expect": { + "error": "S3 Object Lambda does not support S3 Accelerate" + }, + "params": { + "Accelerate": true, + "UseObjectLambdaEndpoint": true, + "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "WriteGetObjectResponse with fips in CN", + "expect": { + "error": "Partition does not support FIPS" + }, + "params": { "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner" + "Region": "cn-north-1", + "UseObjectLambdaEndpoint": true, + "UseDualStack": false, + "UseFIPS": true } }, { - "documentation": "object lambda @cn-north-1, client region us-west-2 (cross partition), useArnRegion=true", + "documentation": "WriteGetObjectResponse with invalid partition", "expect": { - "error": "Client was configured for partition `aws` but ARN (`arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner`) has `aws-cn`" + "error": "Invalid region: region was not a valid DNS name." }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "aws-global", - "AWS::S3::UseArnRegion": true + "params": { + "Accelerate": false, + "UseObjectLambdaEndpoint": true, + "Region": "not a valid DNS name", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "WriteGetObjectResponse with an unknown partition", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-object-lambda", + "disableDoubleEncoding": true, + "signingRegion": "us-east.special" + } + ] }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner", - "Key": "key" - } + "url": "https://s3-object-lambda.us-east.special.amazonaws.com" } - ], + }, "params": { - "Region": "aws-global", - "UseFIPS": false, - "UseDualStack": false, "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner" + "UseObjectLambdaEndpoint": true, + "Region": "us-east.special", + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "object lambda with dualstack", + "documentation": "S3 Outposts bucketAlias Real Outpost Prod us-west-1", "expect": { - "error": "S3 Object Lambda does not support Dual-stack" - }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseDualStack": true, - "AWS::S3::UseArnRegion": false + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-1", + "disableDoubleEncoding": true + } + ] }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", - "Key": "key" - } + "url": "https://test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.op-0b1d075431d83bebd.s3-outposts.us-west-1.amazonaws.com" } - ], + }, "params": { - "Region": "us-west-2", + "Region": "us-west-1", + "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", "UseFIPS": false, - "UseDualStack": true, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner" + "UseDualStack": false, + "Accelerate": false } }, { - "documentation": "object lambda @us-gov-east-1", + "documentation": "S3 Outposts bucketAlias Real Outpost Prod ap-east-1", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-object-lambda", - "signingRegion": "us-gov-east-1", + "signingName": "s3-outposts", + "signingRegion": "ap-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://mybanner-123456789012.s3-object-lambda.us-gov-east-1.amazonaws.com" + "url": "https://test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.op-0b1d075431d83bebd.s3-outposts.ap-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "ap-east-1", + "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws-us-gov:s3-object-lambda:us-gov-east-1:123456789012:accesspoint/mybanner" + "Accelerate": false } }, { - "documentation": "object lambda @us-gov-east-1, with fips", + "documentation": "S3 Outposts bucketAlias Ec2 Outpost Prod us-east-1", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-object-lambda", - "signingRegion": "us-gov-east-1", + "signingName": "s3-outposts", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://mybanner-123456789012.s3-object-lambda-fips.us-gov-east-1.amazonaws.com" + "url": "https://test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.ec2.s3-outposts.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, + "Region": "us-east-1", + "Bucket": "test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", + "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws-us-gov:s3-object-lambda:us-gov-east-1:123456789012:accesspoint/mybanner" + "Accelerate": false } }, { - "documentation": "object lambda @cn-north-1, with fips", + "documentation": "S3 Outposts bucketAlias Ec2 Outpost Prod me-south-1", "expect": { - "error": "Partition does not support FIPS" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "me-south-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.ec2.s3-outposts.me-south-1.amazonaws.com" + } }, "params": { - "Region": "cn-north-1", - "UseFIPS": true, + "Region": "me-south-1", + "Bucket": "test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", + "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws-cn:s3-object-lambda:cn-north-1:123456789012:accesspoint/mybanner" + "Accelerate": false } }, { - "documentation": "object lambda with accelerate", + "documentation": "S3 Outposts bucketAlias Real Outpost Beta", "expect": { - "error": "S3 Object Lambda does not support S3 Accelerate" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kbeta0--op-s3.op-0b1d075431d83bebd.example.amazonaws.com" + } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3::Accelerate": true, - "AWS::S3::UseArnRegion": false + "params": { + "Region": "us-east-1", + "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kbeta0--op-s3", + "Endpoint": "https://example.amazonaws.com", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Outposts bucketAlias Ec2 Outpost Beta", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", - "Key": "key" - } + "url": "https://161743052723-e00000136899934034jeahy1t8gpzpbwjj8kb7beta0--op-s3.ec2.example.amazonaws.com" } - ], + }, "params": { - "Region": "us-west-2", + "Region": "us-east-1", + "Bucket": "161743052723-e00000136899934034jeahy1t8gpzpbwjj8kb7beta0--op-s3", + "Endpoint": "https://example.amazonaws.com", "UseFIPS": false, "UseDualStack": false, - "Accelerate": true, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - bad service and someresource", + "documentation": "S3 Outposts bucketAlias - No endpoint set for beta", "expect": { - "error": "Invalid ARN: Unrecognized format: arn:aws:sqs:us-west-2:123456789012:someresource (type: someresource)" + "error": "Expected a endpoint to be specified but no endpoint was found" }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3::UseArnRegion": false - }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws:sqs:us-west-2:123456789012:someresource", - "Key": "key" - } - } - ], "params": { - "Region": "us-west-2", + "Region": "us-east-1", + "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kbeta0--op-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:sqs:us-west-2:123456789012:someresource" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - invalid resource", + "documentation": "S3 Outposts bucketAlias Invalid hardware type", "expect": { - "error": "Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `bucket_name`" + "error": "Unrecognized hardware type: \"Expected hardware type o or e but got h\"" }, "params": { - "Region": "us-west-2", + "Region": "us-east-1", + "Bucket": "test-accessp-h0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:bucket_name:mybucket" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - missing region", + "documentation": "S3 Outposts bucketAlias Special character in Outpost Arn", "expect": { - "error": "Invalid ARN: bucket ARN is missing a region" + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`." }, "params": { - "Region": "us-west-2", + "Region": "us-east-1", + "Bucket": "test-accessp-o00000754%1d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda::123456789012:accesspoint/mybanner" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - missing account-id", + "documentation": "S3 Outposts bucketAlias - No endpoint set for beta", "expect": { - "error": "Invalid ARN: Missing account id" + "error": "Expected a endpoint to be specified but no endpoint was found" }, "params": { - "Region": "us-west-2", + "Region": "us-east-1", + "Bucket": "test-accessp-e0b1d075431d83bebde8xz5w8ijx1qzlbp3i3ebeta0--op-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-west-2::accesspoint/mybanner" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - account id contains invalid characters", + "documentation": "S3 Snow with bucket", "expect": { - "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `123.45678.9012`" - }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3::UseArnRegion": true + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] }, - "operationName": "GetObject", - "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123.45678.9012:accesspoint:mybucket", - "Key": "key" - } + "url": "http://10.0.1.12:433/bucketName" } - ], + }, "params": { - "Region": "us-west-2", + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "http://10.0.1.12:433", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123.45678.9012:accesspoint:mybucket" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - missing access point name", + "documentation": "S3 Snow without bucket", "expect": { - "error": "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://10.0.1.12:433" + } }, "params": { - "Region": "us-west-2", + "Region": "snow", + "Endpoint": "https://10.0.1.12:433", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - access point name contains invalid character: *", + "documentation": "S3 Snow no port", "expect": { - "error": "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `*`" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://10.0.1.12/bucketName" + } }, "params": { - "Region": "us-west-2", + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "http://10.0.1.12", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:*" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - access point name contains invalid character: .", + "documentation": "S3 Snow dns endpoint", "expect": { - "error": "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `my.bucket`" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://amazonaws.com/bucketName" + } }, "params": { - "Region": "us-west-2", + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "https://amazonaws.com", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:my.bucket" + "Accelerate": false } }, { - "documentation": "object lambda with invalid arn - access point name contains sub resources", + "documentation": "Data Plane with short AZ", "expect": { - "error": "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`." + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--use1-az1--x-s3.s3express-use1-az1.us-east-1.amazonaws.com" + } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--use1-az1--x-s3", + "Key": "key" + } + } + ], "params": { - "Region": "us-west-2", + "Region": "us-east-1", + "Bucket": "mybucket--use1-az1--x-s3", "UseFIPS": false, "UseDualStack": false, "Accelerate": false, - "UseArnRegion": true, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:mybucket:object:foo" + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "object lambda with custom endpoint", + "documentation": "Data Plane with short AZ fips", "expect": { "endpoint": { "properties": { "authSchemes": [ { - "name": "sigv4", - "signingName": "s3-object-lambda", - "signingRegion": "us-west-2", + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://mybanner-123456789012.my-endpoint.com" + "url": "https://mybucket--use1-az1--x-s3.s3express-fips-use1-az1.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://my-endpoint.com", - "AWS::S3::UseArnRegion": false + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", + "Bucket": "mybucket--use1-az1--x-s3", "Key": "key" } } ], "params": { - "Region": "us-west-2", - "UseFIPS": false, + "Region": "us-east-1", + "Bucket": "mybucket--use1-az1--x-s3", + "UseFIPS": true, "UseDualStack": false, "Accelerate": false, - "UseArnRegion": false, - "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/mybanner", - "Endpoint": "https://my-endpoint.com" + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "object lambda arn with region mismatch and UseArnRegion=false", + "documentation": "Data Plane with long AZ", "expect": { - "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "ap-northeast-1", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--apne1-az1--x-s3.s3express-apne1-az1.ap-northeast-1.amazonaws.com" + } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3::UseArnRegion": false + "AWS::Region": "ap-northeast-1" }, "operationName": "GetObject", "operationParams": { - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", + "Bucket": "mybucket--apne1-az1--x-s3", "Key": "key" } } ], "params": { - "Accelerate": false, - "Bucket": "arn:aws:s3-object-lambda:us-east-1:123456789012:accesspoint/mybanner", - "ForcePathStyle": false, - "UseArnRegion": false, - "Region": "us-west-2", + "Region": "ap-northeast-1", + "Bucket": "mybucket--apne1-az1--x-s3", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "WriteGetObjectResponse @ us-west-2", + "documentation": "Data Plane with long AZ fips", "expect": { "endpoint": { "properties": { "authSchemes": [ { - "name": "sigv4", - "signingName": "s3-object-lambda", - "signingRegion": "us-west-2", + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "ap-northeast-1", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://s3-object-lambda.us-west-2.amazonaws.com" + "url": "https://mybucket--apne1-az1--x-s3.s3express-fips-apne1-az1.ap-northeast-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "ap-northeast-1", + "AWS::UseFIPS": true }, - "operationName": "WriteGetObjectResponse", + "operationName": "GetObject", "operationParams": { - "RequestRoute": "RequestRoute", - "RequestToken": "RequestToken" + "Bucket": "mybucket--apne1-az1--x-s3", + "Key": "key" } } ], "params": { - "Accelerate": false, - "UseObjectLambdaEndpoint": true, - "Region": "us-west-2", + "Region": "ap-northeast-1", + "Bucket": "mybucket--apne1-az1--x-s3", + "UseFIPS": true, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "WriteGetObjectResponse with custom endpoint", + "documentation": "Control plane with short AZ bucket", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-object-lambda", - "signingRegion": "us-west-2", + "signingName": "s3express", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://my-endpoint.com" + "url": "https://s3express-control.us-east-1.amazonaws.com/mybucket--use1-az1--x-s3" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://my-endpoint.com" + "AWS::Region": "us-east-1" }, - "operationName": "WriteGetObjectResponse", + "operationName": "CreateBucket", "operationParams": { - "RequestRoute": "RequestRoute", - "RequestToken": "RequestToken" + "Bucket": "mybucket--use1-az1--x-s3" } } ], "params": { - "Accelerate": false, - "UseObjectLambdaEndpoint": true, - "Endpoint": "https://my-endpoint.com", - "Region": "us-west-2", + "Region": "us-east-1", + "Bucket": "mybucket--use1-az1--x-s3", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": true, + "DisableS3ExpressSessionAuth": false } }, { - "documentation": "WriteGetObjectResponse @ us-east-1", + "documentation": "Control plane with short AZ bucket and fips", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-object-lambda", + "signingName": "s3express", "signingRegion": "us-east-1", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://s3-object-lambda.us-east-1.amazonaws.com" + "url": "https://s3express-control-fips.us-east-1.amazonaws.com/mybucket--use1-az1--x-s3" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1" + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true }, - "operationName": "WriteGetObjectResponse", + "operationName": "CreateBucket", "operationParams": { - "RequestRoute": "RequestRoute", - "RequestToken": "RequestToken" + "Bucket": "mybucket--use1-az1--x-s3" } } ], "params": { - "Accelerate": false, - "UseObjectLambdaEndpoint": true, "Region": "us-east-1", + "Bucket": "mybucket--use1-az1--x-s3", + "UseFIPS": true, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": true, + "DisableS3ExpressSessionAuth": false } }, { - "documentation": "WriteGetObjectResponse with fips", + "documentation": "Control plane without bucket", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-object-lambda", + "signingName": "s3express", "signingRegion": "us-east-1", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://s3-object-lambda-fips.us-east-1.amazonaws.com" + "url": "https://s3express-control.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", - "AWS::UseFIPS": true + "AWS::Region": "us-east-1" }, - "operationName": "WriteGetObjectResponse", - "operationParams": { - "RequestRoute": "RequestRoute", - "RequestToken": "RequestToken" - } + "operationName": "ListDirectoryBuckets" } ], "params": { - "Accelerate": false, - "UseObjectLambdaEndpoint": true, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": true + "Accelerate": false, + "UseS3ExpressControlEndpoint": true, + "DisableS3ExpressSessionAuth": false } }, { - "documentation": "WriteGetObjectResponse with dualstack", + "documentation": "Control plane without bucket and fips", "expect": { - "error": "S3 Object Lambda does not support Dual-stack" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://s3express-control-fips.us-east-1.amazonaws.com" + } }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-east-1", - "AWS::UseDualStack": true + "AWS::UseFIPS": true }, - "operationName": "WriteGetObjectResponse", - "operationParams": { - "RequestRoute": "RequestRoute", - "RequestToken": "RequestToken" - } + "operationName": "ListDirectoryBuckets" } ], "params": { - "Accelerate": false, - "UseObjectLambdaEndpoint": true, - "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "WriteGetObjectResponse with accelerate", - "expect": { - "error": "S3 Object Lambda does not support S3 Accelerate" - }, - "params": { - "Accelerate": true, - "UseObjectLambdaEndpoint": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": true, + "DisableS3ExpressSessionAuth": false } }, { - "documentation": "WriteGetObjectResponse with fips in CN", + "documentation": "Data Plane sigv4 auth with short AZ", "expect": { - "error": "Partition does not support FIPS" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com" + } }, "params": { - "Accelerate": false, - "Region": "cn-north-1", - "UseObjectLambdaEndpoint": true, + "Region": "us-west-2", + "Bucket": "mybucket--usw2-az1--x-s3", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": true + "Accelerate": false, + "DisableS3ExpressSessionAuth": true } }, { - "documentation": "WriteGetObjectResponse with invalid partition", + "documentation": "Data Plane sigv4 auth with short AZ fips", "expect": { - "error": "Invalid region: region was not a valid DNS name." + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ], + "backend": "S3Express" + }, + "url": "https://mybucket--usw2-az1--x-s3.s3express-fips-usw2-az1.us-west-2.amazonaws.com" + } }, "params": { - "Accelerate": false, - "UseObjectLambdaEndpoint": true, - "Region": "not a valid DNS name", + "Region": "us-west-2", + "Bucket": "mybucket--usw2-az1--x-s3", + "UseFIPS": true, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "DisableS3ExpressSessionAuth": true } }, { - "documentation": "WriteGetObjectResponse with an unknown partition", + "documentation": "Data Plane sigv4 auth with long AZ", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-object-lambda", - "disableDoubleEncoding": true, - "signingRegion": "us-east.special" + "signingName": "s3express", + "signingRegion": "ap-northeast-1", + "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://s3-object-lambda.us-east.special.amazonaws.com" + "url": "https://mybucket--apne1-az1--x-s3.s3express-apne1-az1.ap-northeast-1.amazonaws.com" } }, "params": { - "Accelerate": false, - "UseObjectLambdaEndpoint": true, - "Region": "us-east.special", + "Region": "ap-northeast-1", + "Bucket": "mybucket--apne1-az1--x-s3", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": false, + "DisableS3ExpressSessionAuth": true } }, { - "documentation": "S3 Outposts bucketAlias Real Outpost Prod us-west-1", + "documentation": "Data Plane sigv4 auth with long AZ fips", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-west-1", + "signingName": "s3express", + "signingRegion": "ap-northeast-1", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.op-0b1d075431d83bebd.s3-outposts.us-west-1.amazonaws.com" + "url": "https://mybucket--apne1-az1--x-s3.s3express-fips-apne1-az1.ap-northeast-1.amazonaws.com" } }, "params": { - "Region": "us-west-1", - "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", - "UseFIPS": false, + "Region": "ap-northeast-1", + "Bucket": "mybucket--apne1-az1--x-s3", + "UseFIPS": true, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": false, + "DisableS3ExpressSessionAuth": true } }, { - "documentation": "S3 Outposts bucketAlias Real Outpost Prod ap-east-1", + "documentation": "Control Plane host override", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "ap-east-1", + "signingName": "s3express", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.op-0b1d075431d83bebd.s3-outposts.ap-east-1.amazonaws.com" + "url": "https://mybucket--usw2-az1--x-s3.custom.com" } }, "params": { - "Region": "ap-east-1", - "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", + "Region": "us-west-2", + "Bucket": "mybucket--usw2-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": true, + "DisableS3ExpressSessionAuth": true, + "Endpoint": "https://custom.com" } }, { - "documentation": "S3 Outposts bucketAlias Ec2 Outpost Prod us-east-1", + "documentation": "Control Plane host override no bucket", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", + "signingName": "s3express", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.ec2.s3-outposts.us-east-1.amazonaws.com" + "url": "https://custom.com" } }, "params": { - "Region": "us-east-1", - "Bucket": "test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", + "Region": "us-west-2", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": true, + "DisableS3ExpressSessionAuth": true, + "Endpoint": "https://custom.com" } }, { - "documentation": "S3 Outposts bucketAlias Ec2 Outpost Prod me-south-1", + "documentation": "Data plane host override non virtual session auth", "expect": { "endpoint": { "properties": { "authSchemes": [ { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "me-south-1", + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3.ec2.s3-outposts.me-south-1.amazonaws.com" + "url": "https://10.0.0.1/mybucket--usw2-az1--x-s3" } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://10.0.0.1" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--usw2-az1--x-s3", + "Key": "key" + } + } + ], "params": { - "Region": "me-south-1", - "Bucket": "test-accessp-e0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", + "Region": "us-west-2", + "Bucket": "mybucket--usw2-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "Endpoint": "https://10.0.0.1" } }, { - "documentation": "S3 Outposts bucketAlias Real Outpost Beta", + "documentation": "Control Plane host override ip", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", + "signingName": "s3express", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" }, - "url": "https://test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kbeta0--op-s3.op-0b1d075431d83bebd.example.amazonaws.com" + "url": "https://10.0.0.1/mybucket--usw2-az1--x-s3" } }, "params": { - "Region": "us-east-1", - "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kbeta0--op-s3", - "Endpoint": "https://example.amazonaws.com", + "Region": "us-west-2", + "Bucket": "mybucket--usw2-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": true, + "DisableS3ExpressSessionAuth": true, + "Endpoint": "https://10.0.0.1" } }, { - "documentation": "S3 Outposts bucketAlias Ec2 Outpost Beta", + "documentation": "Data plane host override", "expect": { "endpoint": { "properties": { "authSchemes": [ { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", + "name": "sigv4-s3express", + "signingName": "s3express", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } - ] + ], + "backend": "S3Express" + }, + "url": "https://mybucket--usw2-az1--x-s3.custom.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://custom.com" }, - "url": "https://161743052723-e00000136899934034jeahy1t8gpzpbwjj8kb7beta0--op-s3.ec2.example.amazonaws.com" + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--usw2-az1--x-s3", + "Key": "key" + } } - }, + ], "params": { - "Region": "us-east-1", - "Bucket": "161743052723-e00000136899934034jeahy1t8gpzpbwjj8kb7beta0--op-s3", - "Endpoint": "https://example.amazonaws.com", + "Region": "us-west-2", + "Bucket": "mybucket--usw2-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "Endpoint": "https://custom.com" } }, { - "documentation": "S3 Outposts bucketAlias - No endpoint set for beta", + "documentation": "bad format error", "expect": { - "error": "Expected a endpoint to be specified but no endpoint was found" + "error": "Unrecognized S3Express bucket name format." }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--usaz1--x-s3", + "Key": "key" + } + } + ], "params": { "Region": "us-east-1", - "Bucket": "test-accessp-o0b1d075431d83bebde8xz5w8ijx1qzlbp3i3kbeta0--op-s3", + "Bucket": "mybucket--usaz1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "S3 Outposts bucketAlias Invalid hardware type", + "documentation": "bad format error no session auth", "expect": { - "error": "Unrecognized hardware type: \"Expected hardware type o or e but got h\"" + "error": "Unrecognized S3Express bucket name format." }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--usaz1--x-s3", + "Key": "key" + } + } + ], "params": { "Region": "us-east-1", - "Bucket": "test-accessp-h0000075431d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", + "Bucket": "mybucket--usaz1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": false, + "DisableS3ExpressSessionAuth": true } }, { - "documentation": "S3 Outposts bucketAlias Special character in Outpost Arn", + "documentation": "dual-stack error", "expect": { - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`." + "error": "S3Express does not support Dual-stack." }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--use1-az1--x-s3", + "Key": "key" + } + } + ], "params": { "Region": "us-east-1", - "Bucket": "test-accessp-o00000754%1d83bebde8xz5w8ijx1qzlbp3i3kuse10--op-s3", + "Bucket": "mybucket--use1-az1--x-s3", "UseFIPS": false, - "UseDualStack": false, - "Accelerate": false + "UseDualStack": true, + "Accelerate": false, + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "S3 Outposts bucketAlias - No endpoint set for beta", + "documentation": "accelerate error", "expect": { - "error": "Expected a endpoint to be specified but no endpoint was found" + "error": "S3Express does not support S3 Accelerate." }, - "params": { - "Region": "us-east-1", - "Bucket": "test-accessp-e0b1d075431d83bebde8xz5w8ijx1qzlbp3i3ebeta0--op-s3", - "UseFIPS": false, - "UseDualStack": false, - "Accelerate": false - } - }, - { - "documentation": "S3 Snow with bucket", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::S3::Accelerate": true }, - "url": "http://10.0.1.12:433/bucketName" + "operationName": "GetObject", + "operationParams": { + "Bucket": "mybucket--use1-az1--x-s3", + "Key": "key" + } } - }, + ], "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "http://10.0.1.12:433", + "Region": "us-east-1", + "Bucket": "mybucket--use1-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": true, + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "S3 Snow without bucket", + "documentation": "Data plane bucket format error", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] + "error": "S3Express bucket name is not a valid virtual hostable name." + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "url": "https://10.0.1.12:433" + "operationName": "GetObject", + "operationParams": { + "Bucket": "my.bucket--use1-az1--x-s3", + "Key": "key" + } } - }, + ], "params": { - "Region": "snow", - "Endpoint": "https://10.0.1.12:433", + "Region": "us-east-1", + "Bucket": "my.bucket--use1-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "UseS3ExpressControlEndpoint": false } }, { - "documentation": "S3 Snow no port", + "documentation": "host override data plane bucket error session auth", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] + "error": "S3Express bucket name is not a valid virtual hostable name." + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://custom.com" }, - "url": "http://10.0.1.12/bucketName" + "operationName": "GetObject", + "operationParams": { + "Bucket": "my.bucket--usw2-az1--x-s3", + "Key": "key" + } } - }, + ], "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "http://10.0.1.12", + "Region": "us-west-2", + "Bucket": "my.bucket--usw2-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "Endpoint": "https://custom.com" } }, { - "documentation": "S3 Snow dns endpoint", + "documentation": "host override data plane bucket error", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://amazonaws.com/bucketName" - } + "error": "S3Express bucket name is not a valid virtual hostable name." }, "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "https://amazonaws.com", + "Region": "us-west-2", + "Bucket": "my.bucket--usw2-az1--x-s3", "UseFIPS": false, "UseDualStack": false, - "Accelerate": false + "Accelerate": false, + "Endpoint": "https://custom.com", + "DisableS3ExpressSessionAuth": true } } ], @@ -15765,7 +17488,7 @@ } }, "traits": { - "smithy.api#documentation": "

In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally\n unique, and the namespace is shared by all Amazon Web Services accounts.

" + "smithy.api#documentation": "

In terms of implementation, a Bucket is a resource.

" } }, "com.amazonaws.s3#BucketAccelerateStatus": { @@ -15790,7 +17513,8 @@ "members": {}, "traits": { "smithy.api#documentation": "

The requested bucket name is not available. The bucket namespace is shared by all users\n of the system. Select a different name and try again.

", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 409 } }, "com.amazonaws.s3#BucketAlreadyOwnedByYou": { @@ -15798,7 +17522,8 @@ "members": {}, "traits": { "smithy.api#documentation": "

The bucket you tried to create already exists, and you own it. Amazon S3 returns this error\n in all Amazon Web Services Regions except in the North Virginia Region. For legacy compatibility, if you\n re-create an existing bucket that you already own in the North Virginia Region, Amazon S3\n returns 200 OK and resets the bucket access control lists (ACLs).

", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 409 } }, "com.amazonaws.s3#BucketCannedACL": { @@ -15830,6 +17555,26 @@ } } }, + "com.amazonaws.s3#BucketInfo": { + "type": "structure", + "members": { + "DataRedundancy": { + "target": "com.amazonaws.s3#DataRedundancy", + "traits": { + "smithy.api#documentation": "

The number of Availability Zone that's used for redundancy for the bucket.

" + } + }, + "Type": { + "target": "com.amazonaws.s3#BucketType", + "traits": { + "smithy.api#documentation": "

The type of bucket.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the information about the bucket that will be created. For more information about directory buckets, see \n Directory buckets in the Amazon S3 User Guide.

\n \n

This functionality is only supported by directory buckets.

\n
" + } + }, "com.amazonaws.s3#BucketKeyEnabled": { "type": "boolean" }, @@ -15889,6 +17634,12 @@ "smithy.api#enumValue": "ap-south-1" } }, + "ap_south_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ap-south-2" + } + }, "ap_southeast_1": { "target": "smithy.api#Unit", "traits": { @@ -15949,6 +17700,12 @@ "smithy.api#enumValue": "eu-south-1" } }, + "eu_south_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "eu-south-2" + } + }, "eu_west_1": { "target": "smithy.api#Unit", "traits": { @@ -16008,21 +17765,12 @@ "traits": { "smithy.api#enumValue": "us-west-2" } - }, - "ap_south_2": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ap-south-2" - } - }, - "eu_south_2": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "eu-south-2" - } } } }, + "com.amazonaws.s3#BucketLocationName": { + "type": "string" + }, "com.amazonaws.s3#BucketLoggingStatus": { "type": "structure", "members": { @@ -16060,6 +17808,17 @@ "com.amazonaws.s3#BucketName": { "type": "string" }, + "com.amazonaws.s3#BucketType": { + "type": "enum", + "members": { + "Directory": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Directory" + } + } + } + }, "com.amazonaws.s3#BucketVersioningStatus": { "type": "enum", "members": { @@ -16272,25 +18031,25 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -16391,7 +18150,7 @@ "target": "com.amazonaws.s3#CompleteMultipartUploadOutput" }, "traits": { - "smithy.api#documentation": "

Completes a multipart upload by assembling previously uploaded parts.

\n

You first initiate the multipart upload and then upload all parts using the UploadPart\n operation. After successfully uploading all relevant parts of an upload, you call this\n action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the Complete Multipart Upload\n request, you must provide the parts list. You must ensure that the parts list is complete.\n This action concatenates the parts that you provide in the list. For each part in the list,\n you must provide the part number and the ETag value, returned after that part\n was uploaded.

\n

Processing of a Complete Multipart Upload request could take several minutes to\n complete. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK response has been sent. This means that a 200 OK response can\n contain either a success or an error. If you call the S3 API directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throws an exception (or, for\n the SDKs that don't use exceptions, they return the error).

\n

Note that if CompleteMultipartUpload fails, applications should be prepared\n to retry the failed requests. For more information, see Amazon S3 Error Best\n Practices.

\n \n

You cannot use Content-Type: application/x-www-form-urlencoded with\n Complete Multipart Upload requests. Also, if you do not provide a\n Content-Type header, CompleteMultipartUpload returns a 200\n OK response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload.

\n

For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions.

\n

\n CompleteMultipartUpload has the following special errors:

\n
    \n
  • \n

    Error code: EntityTooSmall\n

    \n
      \n
    • \n

      Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.

      \n
    • \n
    • \n

      400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error code: InvalidPart\n

    \n
      \n
    • \n

      Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified entity tag might not have\n matched the part's entity tag.

      \n
    • \n
    • \n

      400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error code: InvalidPartOrder\n

    \n
      \n
    • \n

      Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.

      \n
    • \n
    • \n

      400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.

      \n
    • \n
    • \n

      404 Not Found

      \n
    • \n
    \n
  • \n
\n

The following operations are related to CompleteMultipartUpload:

\n ", + "smithy.api#documentation": "

Completes a multipart upload by assembling previously uploaded parts.

\n

You first initiate the multipart upload and then upload all parts using the UploadPart\n operation or the UploadPartCopy\n operation. After successfully uploading all relevant parts of an upload, you call this\n CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the CompleteMultipartUpload \n request, you must provide the parts list and ensure that the parts list is complete.\n The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list,\n you must provide the PartNumber value and the ETag value that are returned after that part\n was uploaded.

\n

The processing of a CompleteMultipartUpload request could take several minutes to\n finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK response has been sent. This means that a 200 OK response can\n contain either a success or an error. The error response might be embedded in the 200 OK response. \n If you call this API operation directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throw an exception (or, for\n the SDKs that don't use exceptions, they return an error).

\n

Note that if CompleteMultipartUpload fails, applications should be prepared\n to retry the failed requests. For more information, see Amazon S3 Error Best\n Practices.

\n \n

You can't use Content-Type: application/x-www-form-urlencoded for the \n CompleteMultipartUpload requests. Also, if you don't provide a\n Content-Type header, CompleteMultipartUpload can still return a 200\n OK response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: EntityTooSmall\n

    \n
      \n
    • \n

      Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPart\n

    \n
      \n
    • \n

      Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified ETag might not have\n matched the uploaded part's ETag.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPartOrder\n

    \n
      \n
    • \n

      Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CompleteMultipartUpload:

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}?x-id=CompleteMultipartUpload", @@ -16411,7 +18170,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket that contains the newly created object. Does not return the access point\n ARN or access point alias if used.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The name of the bucket that contains the newly created object. Does not return the access point\n ARN or access point alias if used.

\n \n

Access points are not supported by directory buckets.

\n
" } }, "Key": { @@ -16423,7 +18182,7 @@ "Expiration": { "target": "com.amazonaws.s3#Expiration", "traits": { - "smithy.api#documentation": "

If the object expiration is configured, this will contain the expiration date\n (expiry-date) and rule ID (rule-id). The value of\n rule-id is URL-encoded.

", + "smithy.api#documentation": "

If the object expiration is configured, this will contain the expiration date\n (expiry-date) and rule ID (rule-id). The value of\n rule-id is URL-encoded.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-expiration" } }, @@ -16436,52 +18195,52 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

Version ID of the newly created object, in case the bucket has versioning turned\n on.

", + "smithy.api#documentation": "

Version ID of the newly created object, in case the bucket has versioning turned\n on.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-version-id" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

", + "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -16503,7 +18262,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

Name of the bucket to which the multipart upload was initiated.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Name of the bucket to which the multipart upload was initiated.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -16575,28 +18334,28 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is\n required only when the object was created using a checksum algorithm or if\n your bucket policy requires the use of SSE-C. For more information, see Protecting data\n using SSE-C keys in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is\n required only when the object was created using a checksum algorithm or if\n your bucket policy requires the use of SSE-C. For more information, see Protecting data\n using SSE-C keys in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. \n For more information, see\n Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. \n For more information, see\n Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum \n algorithm. For more information,\n see Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum \n algorithm. For more information,\n see Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } } @@ -16633,31 +18392,31 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "PartNumber": { "target": "com.amazonaws.s3#PartNumber", "traits": { - "smithy.api#documentation": "

Part number that identifies the part. This is a positive integer between 1 and\n 10,000.

" + "smithy.api#documentation": "

Part number that identifies the part. This is a positive integer between 1 and\n 10,000.

\n \n
    \n
  • \n

    \n General purpose buckets - In CompleteMultipartUpload, when a additional checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or \n x-amz-checksum-sha256) is applied to each part, the PartNumber must start at 1 and \n the part numbers must be consecutive. Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an InvalidPartOrder error code.

    \n
  • \n
  • \n

    \n Directory buckets - In CompleteMultipartUpload, the PartNumber must start at 1 and \n the part numbers must be consecutive.

    \n
  • \n
\n
" } } }, @@ -16759,7 +18518,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a copy of an object that is already stored in Amazon S3.

\n \n

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.

\n
\n

All copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account.

\n

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. This means that a 200 OK\n response can contain either a success or an error. If you call the S3 API directly, make\n sure to design your application to parse the contents of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throws an exception (or, for the SDKs that don't use exceptions, they return the\n error).

\n

If the copy is successful, you receive a response with information about the copied\n object.

\n \n

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.

\n
\n

The copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. For pricing information, see\n Amazon S3 pricing.

\n \n

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer\n Acceleration.

\n
\n
\n
Metadata
\n
\n

When copying an object, you can preserve all metadata (the default) or specify\n new metadata. However, the access control list (ACL) is not preserved and is set\n to private for the user making the request. To override the default ACL setting,\n specify a new ACL when generating a copy request. For more information, see Using\n ACLs.

\n

To specify whether you want the object metadata copied from the source object\n or replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive header. When you grant permissions, you\n can use the s3:x-amz-metadata-directive condition key to enforce\n certain metadata behavior when objects are uploaded. For more information, see\n Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list\n of Amazon S3-specific condition keys, see Actions, Resources, and Condition\n Keys for Amazon S3.

\n \n

\n x-amz-website-redirect-location is unique to each object and\n must be specified in the request headers to copy the value.

\n
\n
\n
x-amz-copy-source-if Headers
\n
\n

To only copy an object under certain conditions, such as whether the\n Etag matches or whether the object was modified before or after a\n specified date, use the following request parameters:

\n
    \n
  • \n

    \n x-amz-copy-source-if-match\n

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-none-match\n

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-unmodified-since\n

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-modified-since\n

    \n
  • \n
\n

If both the x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the\n request and evaluate as follows, Amazon S3 returns 200 OK and copies the\n data:

\n
    \n
  • \n

    \n x-amz-copy-source-if-match condition evaluates to\n true

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-unmodified-since condition evaluates to\n false

    \n
  • \n
\n

If both the x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the\n request and evaluate as follows, Amazon S3 returns the 412 Precondition\n Failed response code:

\n
    \n
  • \n

    \n x-amz-copy-source-if-none-match condition evaluates to\n false

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-modified-since condition evaluates to\n true

    \n
  • \n
\n \n

All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed.

\n
\n
\n
Server-side encryption
\n
\n

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a default encryption\n configuration that uses server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.

\n

When you perform a CopyObject operation, if you want to use a\n different type of encryption setting for the target object, you can use other\n appropriate encryption-related headers to encrypt the target object with a\n KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If\n the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the\n necessary encryption information in your request so that Amazon S3 can decrypt the\n object for copying. For more information about server-side encryption, see Using\n Server-Side Encryption.

\n

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object. For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.

\n
\n
Access Control List (ACL)-Specific Request Headers
\n
\n

When copying an object, you can optionally use headers to grant ACL-based\n permissions. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual\n Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions\n are then added to the ACL on the object. For more information, see Access Control\n List (ACL) Overview and Managing ACLs Using the REST\n API.

\n

If the bucket that you're copying objects to uses the bucket owner enforced\n setting for S3 Object Ownership, ACLs are disabled and no longer affect\n permissions. Buckets that use this setting only accept PUT requests\n that don't specify an ACL or PUT requests that specify bucket owner\n full control ACLs, such as the bucket-owner-full-control canned ACL\n or an equivalent form of this ACL expressed in the XML format.

\n

For more information, see Controlling\n ownership of objects and disabling ACLs in the\n Amazon S3 User Guide.

\n \n

If your bucket uses the bucket owner enforced setting for Object Ownership,\n all objects written to the bucket by any account will be owned by the bucket\n owner.

\n
\n
\n
Checksums
\n
\n

When copying an object, if it has a checksum, that checksum will be copied to\n the new object by default. When you copy the object over, you can optionally\n specify a different checksum algorithm to use with the\n x-amz-checksum-algorithm header.

\n
\n
Storage Class Options
\n
\n

You can use the CopyObject action to change the storage class of\n an object that is already stored in Amazon S3 by using the StorageClass\n parameter. For more information, see Storage Classes in\n the Amazon S3 User Guide.

\n

If the source object's storage class is GLACIER or\n DEEP_ARCHIVE, or the object's storage class is\n INTELLIGENT_TIERING and it's S3 Intelligent-Tiering access tier is\n Archive Access or Deep Archive Access, you must restore a copy of this object\n before you can use it as a source object for the copy operation. For more\n information, see RestoreObject. For\n more information, see Copying\n Objects.

\n
\n
Versioning
\n
\n

By default, x-amz-copy-source header identifies the current\n version of an object to copy. If the current version is a delete marker, Amazon S3\n behaves as if the object was deleted. To copy a different version, use the\n versionId subresource.

\n

If you enable versioning on the target bucket, Amazon S3 generates a unique version\n ID for the object being copied. This version ID is different from the version ID\n of the source object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id response header in the response.

\n

If you do not enable versioning or suspend it on the target bucket, the version\n ID that Amazon S3 generates is always null.

\n
\n
\n

The following operations are related to CopyObject:

\n ", + "smithy.api#documentation": "

Creates a copy of an object that is already stored in Amazon S3.

\n \n

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.

\n
\n

You can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n

Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account.

\n \n

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer\n Acceleration.

\n
\n
\n
Authentication and authorization
\n
\n

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have\n read access to the source object and write\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have\n \n s3:GetObject\n \n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have\n \n s3:PubObject\n \n permission to write the object copy to the destination bucket.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key can't be set to ReadOnly on the copy destination bucket.

      \n
    • \n
    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Response and special errors
\n
\n

When the request is an HTTP 1.1 request, the response is chunk encoded. \n When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. \n You always need to read the entire response body to check if the copy succeeds. \n to keep the connection alive while we copy the data.

\n
    \n
  • \n

    If the copy is successful, you receive a response with information about the copied\n object.

    \n
  • \n
  • \n

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK response can contain either a success or an error.

    \n
      \n
    • \n

      If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.

      \n
    • \n
    • \n

      If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      \n

      If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).

      \n
    • \n
    \n
  • \n
\n
\n
Charge
\n
\n

The copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. For pricing information, see\n Amazon S3 pricing.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CopyObject:

\n ", "smithy.api#examples": [ { "title": "To copy an object", @@ -16781,6 +18540,11 @@ "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=CopyObject", "code": 200 + }, + "smithy.rules#staticContextParams": { + "DisableS3ExpressSessionAuth": { + "value": true + } } } }, @@ -16797,63 +18561,63 @@ "Expiration": { "target": "com.amazonaws.s3#Expiration", "traits": { - "smithy.api#documentation": "

If the object expiration is configured, the response includes this header.

", + "smithy.api#documentation": "

If the object expiration is configured, the response includes this header.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-expiration" } }, "CopySourceVersionId": { "target": "com.amazonaws.s3#CopySourceVersionId", "traits": { - "smithy.api#documentation": "

Version of the copied object in the destination bucket.

", + "smithy.api#documentation": "

Version ID of the source object that was copied.

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-version-id" } }, "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

Version ID of the newly created copy.

", + "smithy.api#documentation": "

Version ID of the newly created copy.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-version-id" } }, "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header confirming the encryption algorithm used.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to confirm the encryption algorithm that's used.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide round-trip message integrity verification of\n the customer-provided encryption key.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide the round-trip message integrity verification of\n the customer-provided encryption key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

", + "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -16874,14 +18638,14 @@ "ACL": { "target": "com.amazonaws.s3#ObjectCannedACL", "traits": { - "smithy.api#documentation": "

The canned ACL to apply to the object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

The canned access control list (ACL) to apply to the object.

\n

When you copy an object, the ACL metadata is not preserved and is set\n to private by default. Only the owner has full access\n control. To override the default ACL setting,\n specify a new ACL when you generate a copy request. For more information, see Using\n ACLs.

\n

If the destination bucket that you're copying objects to uses the bucket owner enforced\n setting for S3 Object Ownership, ACLs are disabled and no longer affect\n permissions. Buckets that use this setting only accept PUT requests\n that don't specify an ACL or PUT requests that specify bucket owner\n full control ACLs, such as the bucket-owner-full-control canned ACL\n or an equivalent form of this ACL expressed in the XML format. For more information, see Controlling\n ownership of objects and disabling ACLs in the\n Amazon S3 User Guide.

\n \n
    \n
  • \n

    If your destination bucket uses the bucket owner enforced setting for Object Ownership,\n all objects written to the bucket by any account will be owned by the bucket\n owner.

    \n
  • \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-acl" } }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the destination bucket.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the destination bucket.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -16892,28 +18656,28 @@ "CacheControl": { "target": "com.amazonaws.s3#CacheControl", "traits": { - "smithy.api#documentation": "

Specifies caching behavior along the request/reply chain.

", + "smithy.api#documentation": "

Specifies the caching behavior along the request/reply chain.

", "smithy.api#httpHeader": "Cache-Control" } }, "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm you want Amazon S3 to use to create the checksum for the object. For more information, see\n Checking object integrity in\n the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see\n Checking object integrity in\n the Amazon S3 User Guide.

\n

When you copy an object, if the source object has a checksum, that checksum value will be copied to\n the new object by default. If the CopyObject request does not include this x-amz-checksum-algorithm header, the checksum algorithm will be copied from the source object to the destination object (if it's present on the source object). You can optionally\n specify a different checksum algorithm to use with the\n x-amz-checksum-algorithm header. Unrecognized or unsupported values will respond with the HTTP status code 400 Bad Request.

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", "smithy.api#httpHeader": "x-amz-checksum-algorithm" } }, "ContentDisposition": { "target": "com.amazonaws.s3#ContentDisposition", "traits": { - "smithy.api#documentation": "

Specifies presentational information for the object.

", + "smithy.api#documentation": "

Specifies presentational information for the object. Indicates whether an object should be displayed in a web browser or downloaded as a file. It allows specifying the desired filename for the downloaded file.

", "smithy.api#httpHeader": "Content-Disposition" } }, "ContentEncoding": { "target": "com.amazonaws.s3#ContentEncoding", "traits": { - "smithy.api#documentation": "

Specifies what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

", + "smithy.api#documentation": "

Specifies what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

\n \n

For directory buckets, only the aws-chunked value is supported in this header field.

\n
", "smithy.api#httpHeader": "Content-Encoding" } }, @@ -16927,14 +18691,14 @@ "ContentType": { "target": "com.amazonaws.s3#ContentType", "traits": { - "smithy.api#documentation": "

A standard MIME type describing the format of the object data.

", + "smithy.api#documentation": "

A standard MIME type that describes the format of the object data.

", "smithy.api#httpHeader": "Content-Type" } }, "CopySource": { "target": "com.amazonaws.s3#CopySource", "traits": { - "smithy.api#documentation": "

Specifies the source object for the copy operation. You specify the value in one of two\n formats, depending on whether you want to access the source object through an access point:

\n
    \n
  • \n

    For objects not accessed through an access point, specify the name of the source bucket\n and the key of the source object, separated by a slash (/). For example, to copy the\n object reports/january.pdf from the bucket\n awsexamplebucket, use awsexamplebucket/reports/january.pdf.\n The value must be URL-encoded.

    \n
  • \n
  • \n

    For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:::accesspoint//object/. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

    \n \n

    Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.

    \n
    \n

    Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL-encoded.

    \n
  • \n
\n

To copy a specific version of an object, append ?versionId=\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.

", + "smithy.api#documentation": "

Specifies the source object for the copy operation. The source object \n can be up to 5 GB. If the source object is an object that was uploaded by using a multipart upload, the object copy will be a single part object after the source object is copied to the destination bucket.

\n

You specify the value of the copy source in one of two\n formats, depending on whether you want to access the source object through an access point:

\n
    \n
  • \n

    For objects not accessed through an access point, specify the name of the source bucket\n and the key of the source object, separated by a slash (/). For example, to copy the\n object reports/january.pdf from the general purpose bucket \n awsexamplebucket, use awsexamplebucket/reports/january.pdf.\n The value must be URL-encoded. To copy the\n object reports/january.pdf from the directory bucket \n awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf.\n The value must be URL-encoded.

    \n
  • \n
  • \n

    For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:::accesspoint//object/. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

    \n \n
      \n
    • \n

      Amazon S3 supports copy operations using Access points only when the source and destination buckets are in the same Amazon Web Services Region.

      \n
    • \n
    • \n

      Access points are not supported by directory buckets.

      \n
    • \n
    \n
    \n

    Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL-encoded.

    \n
  • \n
\n

If your source bucket versioning is enabled, the x-amz-copy-source header by default identifies the current\n version of an object to copy. If the current version is a delete marker, Amazon S3\n behaves as if the object was deleted. To copy a different version, use the\n versionId query parameter. Specifically, append ?versionId=\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.

\n

If you enable versioning on the destination bucket, Amazon S3 generates a unique version\n ID for the copied object. This version ID is different from the version ID\n of the source object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id response header in the response.

\n

If you do not enable versioning or suspend it on the destination bucket, the version\n ID that Amazon S3 generates in the\n x-amz-version-id response header is always null.

\n \n

\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-copy-source", "smithy.api#required": {} } @@ -16942,28 +18706,28 @@ "CopySourceIfMatch": { "target": "com.amazonaws.s3#CopySourceIfMatch", "traits": { - "smithy.api#documentation": "

Copies the object if its entity tag (ETag) matches the specified tag.

", + "smithy.api#documentation": "

Copies the object if its entity tag (ETag) matches the specified tag.

\n

If both the x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the\n request and evaluate as follows, Amazon S3 returns 200 OK and copies the\n data:

\n
    \n
  • \n

    \n x-amz-copy-source-if-match condition evaluates to\n true

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-unmodified-since condition evaluates to\n false

    \n
  • \n
", "smithy.api#httpHeader": "x-amz-copy-source-if-match" } }, "CopySourceIfModifiedSince": { "target": "com.amazonaws.s3#CopySourceIfModifiedSince", "traits": { - "smithy.api#documentation": "

Copies the object if it has been modified since the specified time.

", + "smithy.api#documentation": "

Copies the object if it has been modified since the specified time.

\n

If both the x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the\n request and evaluate as follows, Amazon S3 returns the 412 Precondition\n Failed response code:

\n
    \n
  • \n

    \n x-amz-copy-source-if-none-match condition evaluates to\n false

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-modified-since condition evaluates to\n true

    \n
  • \n
", "smithy.api#httpHeader": "x-amz-copy-source-if-modified-since" } }, "CopySourceIfNoneMatch": { "target": "com.amazonaws.s3#CopySourceIfNoneMatch", "traits": { - "smithy.api#documentation": "

Copies the object if its entity tag (ETag) is different than the specified ETag.

", + "smithy.api#documentation": "

Copies the object if its entity tag (ETag) is different than the specified ETag.

\n

If both the x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the\n request and evaluate as follows, Amazon S3 returns the 412 Precondition\n Failed response code:

\n
    \n
  • \n

    \n x-amz-copy-source-if-none-match condition evaluates to\n false

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-modified-since condition evaluates to\n true

    \n
  • \n
", "smithy.api#httpHeader": "x-amz-copy-source-if-none-match" } }, "CopySourceIfUnmodifiedSince": { "target": "com.amazonaws.s3#CopySourceIfUnmodifiedSince", "traits": { - "smithy.api#documentation": "

Copies the object if it hasn't been modified since the specified time.

", + "smithy.api#documentation": "

Copies the object if it hasn't been modified since the specified time.

\n

If both the x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the\n request and evaluate as follows, Amazon S3 returns 200 OK and copies the\n data:

\n
    \n
  • \n

    \n x-amz-copy-source-if-match condition evaluates to\n true

    \n
  • \n
  • \n

    \n x-amz-copy-source-if-unmodified-since condition evaluates to\n false

    \n
  • \n
", "smithy.api#httpHeader": "x-amz-copy-source-if-unmodified-since" } }, @@ -16977,28 +18741,28 @@ "GrantFullControl": { "target": "com.amazonaws.s3#GrantFullControl", "traits": { - "smithy.api#documentation": "

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-full-control" } }, "GrantRead": { "target": "com.amazonaws.s3#GrantRead", "traits": { - "smithy.api#documentation": "

Allows grantee to read the object data and its metadata.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to read the object data and its metadata.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-read" } }, "GrantReadACP": { "target": "com.amazonaws.s3#GrantReadACP", "traits": { - "smithy.api#documentation": "

Allows grantee to read the object ACL.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to read the object ACL.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-read-acp" } }, "GrantWriteACP": { "target": "com.amazonaws.s3#GrantWriteACP", "traits": { - "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable object.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-write-acp" } }, @@ -17020,98 +18784,98 @@ "MetadataDirective": { "target": "com.amazonaws.s3#MetadataDirective", "traits": { - "smithy.api#documentation": "

Specifies whether the metadata is copied from the source object or replaced with\n metadata provided in the request.

", + "smithy.api#documentation": "

Specifies whether the metadata is copied from the source object or replaced with\n metadata that's provided in the request. \n When copying an object, you can preserve all metadata (the default) or specify\n new metadata. If this header isn’t specified, COPY is the default behavior. \n

\n

\n General purpose bucket - For general purpose buckets, when you grant permissions, you\n can use the s3:x-amz-metadata-directive condition key to enforce\n certain metadata behavior when objects are uploaded. For more information, see\n Amazon S3 condition key examples in the Amazon S3 User Guide.

\n \n

\n x-amz-website-redirect-location is unique to each object and is not copied when using the\n x-amz-metadata-directive header. To copy the value, you \n must specify x-amz-website-redirect-location in the request header.

\n
", "smithy.api#httpHeader": "x-amz-metadata-directive" } }, "TaggingDirective": { "target": "com.amazonaws.s3#TaggingDirective", "traits": { - "smithy.api#documentation": "

Specifies whether the object tag-set are copied from the source object or replaced with\n tag-set provided in the request.

", + "smithy.api#documentation": "

Specifies whether the object tag-set is copied from the source object or replaced with\n the tag-set that's provided in the request.

\n

The default value is COPY.

\n \n

\n Directory buckets - For directory buckets in a CopyObject operation, only the empty tag-set is supported. Any requests that attempt to write non-empty tags into directory buckets will receive a 501 Not Implemented status code. \nWhen the destination bucket is a directory bucket, you will receive a 501 Not Implemented response in any of the following situations:

\n
    \n
  • \n

    When you attempt to COPY the tag-set from an S3 source object that has non-empty tags.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a source object and set a non-empty value to x-amz-tagging.

    \n
  • \n
  • \n

    When you don't set the x-amz-tagging-directive header and the source object has non-empty tags. This is because the default value of x-amz-tagging-directive is COPY.

    \n
  • \n
\n

Because only the empty tag-set is supported for directory buckets in a CopyObject operation, the following situations are allowed:

\n
    \n
  • \n

    When you attempt to COPY the tag-set from a directory bucket source object that has no tags to a general purpose bucket. It copies an empty tag-set to the destination object.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a directory bucket source object and set the x-amz-tagging value of the directory bucket destination object to empty.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a general purpose bucket source object that has non-empty tags and set the x-amz-tagging value of the directory bucket destination object to empty.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a directory bucket source object and don't set the x-amz-tagging value of the directory bucket destination object. This is because the default value of x-amz-tagging is the empty value.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-tagging-directive" } }, "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

\n

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a default encryption\n configuration that uses server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.

\n

When you perform a CopyObject operation, if you want to use a\n different type of encryption setting for the target object, you can specify \n appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a\n KMS key, or a customer-provided key. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence.

\n

With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. For more information about server-side encryption, see Using\n Server-Side Encryption in the\n Amazon S3 User Guide.

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

If the x-amz-storage-class header is not used, the copied object will be stored in the\n STANDARD Storage Class by default. The STANDARD storage class provides high durability and\n high availability. Depending on performance needs, you can specify a different Storage\n Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see\n Storage\n Classes in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

If the x-amz-storage-class header is not used, the copied object will be stored in the\n STANDARD Storage Class by default. The STANDARD storage class provides high durability and\n high availability. Depending on performance needs, you can specify a different Storage\n Class.\n

\n \n
    \n
  • \n

    \n Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.

    \n
  • \n
  • \n

    \n Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class.

    \n
  • \n
\n
\n

You can use the CopyObject action to change the storage class of\n an object that is already stored in Amazon S3 by using the x-amz-storage-class \n header. For more information, see Storage Classes in\n the Amazon S3 User Guide.

\n

Before using an object as a source object for the copy operation, you must restore a copy of it if it meets any of the following conditions:

\n
    \n
  • \n

    The storage class of the source object is GLACIER or\n DEEP_ARCHIVE.

    \n
  • \n
  • \n

    The storage class of the source object is\n INTELLIGENT_TIERING and it's S3 Intelligent-Tiering access tier is\n Archive Access or Deep Archive Access.

    \n
  • \n
\n

For more\n information, see RestoreObject and Copying\n Objects in\n the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-storage-class" } }, "WebsiteRedirectLocation": { "target": "com.amazonaws.s3#WebsiteRedirectLocation", "traits": { - "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata. This value is unique to each object and is not copied when using the\n x-amz-metadata-directive header. Instead, you may opt to provide this\n header in combination with the directive.

", + "smithy.api#documentation": "

If the destination bucket is configured as a website, redirects requests for this object copy to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata. This value is unique to each object and is not copied when using the\n x-amz-metadata-directive header. Instead, you may opt to provide this\n header in combination with the x-amz-metadata-directive header.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-website-redirect-location" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use to when encrypting the object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example,\n AES256).

\n

When you perform a CopyObject operation, if you want to use a\n different type of encryption setting for the target object, you can specify \n appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a\n KMS key, or a customer-provided key. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded. Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value must be explicitly added to specify encryption context for \n CopyObject requests.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value must be explicitly added to specify encryption context for \n CopyObject requests.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.

\n

Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.

", + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object.

\n

Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.

\n

For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, "CopySourceSSECustomerAlgorithm": { "target": "com.amazonaws.s3#CopySourceSSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use when decrypting the source object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when decrypting the source object (for example,\n AES256).

\n

If\n the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the\n necessary encryption information in your request so that Amazon S3 can decrypt the\n object for copying.

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-server-side-encryption-customer-algorithm" } }, "CopySourceSSECustomerKey": { "target": "com.amazonaws.s3#CopySourceSSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source\n object. The encryption key provided in this header must be one that was used when the\n source object was created.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source\n object. The encryption key provided in this header must be the same one that was used when the\n source object was created.

\n

If\n the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the\n necessary encryption information in your request so that Amazon S3 can decrypt the\n object for copying.

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-server-side-encryption-customer-key" } }, "CopySourceSSECustomerKeyMD5": { "target": "com.amazonaws.s3#CopySourceSSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n

If\n the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the\n necessary encryption information in your request so that Amazon S3 can decrypt the\n object for copying.

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-server-side-encryption-customer-key-MD5" } }, @@ -17124,42 +18888,42 @@ "Tagging": { "target": "com.amazonaws.s3#TaggingHeader", "traits": { - "smithy.api#documentation": "

The tag-set for the object destination object this value must be used in conjunction\n with the TaggingDirective. The tag-set must be encoded as URL Query\n parameters.

", + "smithy.api#documentation": "

The tag-set for the object copy in the destination bucket. This value must be used in conjunction\n with the x-amz-tagging-directive if you choose REPLACE for the x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive, you don't need to set \n the x-amz-tagging header, because the tag-set will be copied from the source object directly. The tag-set must be encoded as URL Query\n parameters.

\n

The default value is the empty value.

\n \n

\n Directory buckets - For directory buckets in a CopyObject operation, only the empty tag-set is supported. Any requests that attempt to write non-empty tags into directory buckets will receive a 501 Not Implemented status code. \nWhen the destination bucket is a directory bucket, you will receive a 501 Not Implemented response in any of the following situations:

\n
    \n
  • \n

    When you attempt to COPY the tag-set from an S3 source object that has non-empty tags.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a source object and set a non-empty value to x-amz-tagging.

    \n
  • \n
  • \n

    When you don't set the x-amz-tagging-directive header and the source object has non-empty tags. This is because the default value of x-amz-tagging-directive is COPY.

    \n
  • \n
\n

Because only the empty tag-set is supported for directory buckets in a CopyObject operation, the following situations are allowed:

\n
    \n
  • \n

    When you attempt to COPY the tag-set from a directory bucket source object that has no tags to a general purpose bucket. It copies an empty tag-set to the destination object.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a directory bucket source object and set the x-amz-tagging value of the directory bucket destination object to empty.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a general purpose bucket source object that has non-empty tags and set the x-amz-tagging value of the directory bucket destination object to empty.

    \n
  • \n
  • \n

    When you attempt to REPLACE the tag-set of a directory bucket source object and don't set the x-amz-tagging value of the directory bucket destination object. This is because the default value of x-amz-tagging is the empty value.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-tagging" } }, "ObjectLockMode": { "target": "com.amazonaws.s3#ObjectLockMode", "traits": { - "smithy.api#documentation": "

The Object Lock mode that you want to apply to the copied object.

", + "smithy.api#documentation": "

The Object Lock mode that you want to apply to the object copy.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-mode" } }, "ObjectLockRetainUntilDate": { "target": "com.amazonaws.s3#ObjectLockRetainUntilDate", "traits": { - "smithy.api#documentation": "

The date and time when you want the copied object's Object Lock to expire.

", + "smithy.api#documentation": "

The date and time when you want the Object Lock of the object copy to expire.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-retain-until-date" } }, "ObjectLockLegalHoldStatus": { "target": "com.amazonaws.s3#ObjectLockLegalHoldStatus", "traits": { - "smithy.api#documentation": "

Specifies whether you want to apply a legal hold to the copied object.

", + "smithy.api#documentation": "

Specifies whether you want to apply a legal hold to the object copy.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-legal-hold" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected destination bucket owner. If the account ID that you provide does not match the actual owner of the destination bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "ExpectedSourceBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected source bucket owner. If the account ID that you provide does not match the actual owner of the source bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-source-expected-bucket-owner" } } @@ -17186,25 +18950,25 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -17230,25 +18994,25 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -17309,7 +19073,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.

\n

Not every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming\n rules.

\n

If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

\n

By default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. To constrain the bucket creation to a\n specific Region, you can use \n LocationConstraint\n condition key. You might choose a Region to\n optimize latency, minimize costs, or address regulatory requirements. For example, if you\n reside in Europe, you will probably find it advantageous to create buckets in the Europe\n (Ireland) Region. For more information, see Accessing a\n bucket.

\n \n

If you send your create bucket request to the s3.amazonaws.com endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature\n calculations in Signature Version 4 must use us-east-1 as the Region, even\n if the location constraint in the request specifies another Region where the bucket is\n to be created. If you create a bucket in a Region other than US East (N. Virginia), your\n application must be able to handle 307 redirect. For more information, see Virtual hosting of\n buckets.

\n
\n
\n
Permissions
\n
\n

In addition to s3:CreateBucket, the following permissions are\n required when your CreateBucket request includes specific\n headers:

\n
    \n
  • \n

    \n Access control lists (ACLs) - If your\n CreateBucket request specifies access control list (ACL)\n permissions and the ACL is public-read, public-read-write,\n authenticated-read, or if you specify access permissions explicitly through\n any other ACL, both s3:CreateBucket and\n s3:PutBucketAcl permissions are needed. If the ACL for the\n CreateBucket request is private or if the request doesn't\n specify any ACLs, only s3:CreateBucket permission is needed.\n

    \n
  • \n
  • \n

    \n Object Lock - If\n ObjectLockEnabledForBucket is set to true in your\n CreateBucket request,\n s3:PutBucketObjectLockConfiguration and\n s3:PutBucketVersioning permissions are required.

    \n
  • \n
  • \n

    \n S3 Object Ownership - If your\n CreateBucket request includes the\n x-amz-object-ownership header, then the\n s3:PutBucketOwnershipControls permission is required. By\n default, ObjectOwnership is set to\n BucketOWnerEnforced and ACLs are disabled. We recommend\n keeping ACLs disabled, except in uncommon use cases where you must control\n access for each object individually. If you want to change the\n ObjectOwnership setting, you can use the\n x-amz-object-ownership header in your\n CreateBucket request to set the ObjectOwnership\n setting of your choice. For more information about S3 Object Ownership, see\n Controlling\n object ownership in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n S3 Block Public Access - If your\n specific use case requires granting public access to your S3 resources, you\n can disable Block Public Access. You can create a new bucket with Block\n Public Access enabled, then separately call the \n DeletePublicAccessBlock\n API. To use this operation, you must have the\n s3:PutBucketPublicAccessBlock permission. By default, all\n Block Public Access settings are enabled for new buckets. To avoid\n inadvertent exposure of your resources, we recommend keeping the S3 Block\n Public Access settings enabled. For more information about S3 Block Public\n Access, see Blocking\n public access to your Amazon S3 storage in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n \n

If your CreateBucket request sets BucketOwnerEnforced for\n Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external\n Amazon Web Services account, your request fails with a 400 error and returns the\n InvalidBucketAcLWithObjectOwnership error code. For more information,\n see Setting Object\n Ownership on an existing bucket in the Amazon S3 User Guide.\n

\n
\n

The following operations are related to CreateBucket:

\n ", + "smithy.api#documentation": "\n

This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see \n CreateBucket\n .

\n
\n

Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.

\n

There are two types of buckets: general purpose buckets and directory buckets. For more\n information about these bucket types, see Creating, configuring, and\n working with Amazon S3 buckets in the Amazon S3 User Guide.

\n \n
    \n
  • \n

    \n General purpose buckets - If you send your CreateBucket request to the s3.amazonaws.com global endpoint,\n the request goes to the us-east-1 Region. So the signature\n calculations in Signature Version 4 must use us-east-1 as the Region, even\n if the location constraint in the request specifies another Region where the bucket is\n to be created. If you create a bucket in a Region other than US East (N. Virginia), your\n application must be able to handle 307 redirect. For more information, see Virtual hosting of\n buckets in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - In addition to the s3:CreateBucket permission, the following permissions are\n required in a policy when your CreateBucket request includes specific\n headers:

    \n
      \n
    • \n

      \n Access control lists (ACLs) - In your CreateBucket request, if you specify an access control list (ACL) \n and set it to public-read, public-read-write,\n authenticated-read, or if you explicitly specify any other custom ACLs, both s3:CreateBucket and\n s3:PutBucketAcl permissions are required. In your CreateBucket request, if you set the ACL to private, \n or if you don't specify any ACLs, only the s3:CreateBucket permission is required.\n

      \n
    • \n
    • \n

      \n Object Lock - In your\n CreateBucket request, if you set \n x-amz-bucket-object-lock-enabled to true, the \n s3:PutBucketObjectLockConfiguration and\n s3:PutBucketVersioning permissions are required.

      \n
    • \n
    • \n

      \n S3 Object Ownership - If your\n CreateBucket request includes the\n x-amz-object-ownership header, then the\n s3:PutBucketOwnershipControls permission is required.

      \n \n

      If your CreateBucket request sets BucketOwnerEnforced for\n Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external\n Amazon Web Services account, your request fails with a 400 error and returns the\n InvalidBucketAcLWithObjectOwnership error code. For more information,\n see Setting Object\n Ownership on an existing bucket in the Amazon S3 User Guide.\n

      \n
      \n
    • \n
    • \n

      \n S3 Block Public Access - If your\n specific use case requires granting public access to your S3 resources, you\n can disable Block Public Access. Specifically, you can create a new bucket with Block\n Public Access enabled, then separately call the \n DeletePublicAccessBlock\n API. To use this operation, you must have the\n s3:PutBucketPublicAccessBlock permission. For more information about S3 Block Public\n Access, see Blocking\n public access to your Amazon S3 storage in the\n Amazon S3 User Guide.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - You must have the s3express:CreateBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n \n

    The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. \n For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 \n Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.\n

    \n

    For more information about permissions for creating and working with \n directory buckets, see Directory buckets in the Amazon S3 User Guide. \n For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.

    \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to CreateBucket:

\n ", "smithy.api#examples": [ { "title": "To create a bucket in a specific region", @@ -17331,6 +19095,9 @@ "code": 200 }, "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + }, "DisableAccessPoints": { "value": true } @@ -17343,7 +19110,19 @@ "LocationConstraint": { "target": "com.amazonaws.s3#BucketLocationConstraint", "traits": { - "smithy.api#documentation": "

Specifies the Region where the bucket will be created. If you don't specify a Region,\n the bucket is created in the US East (N. Virginia) Region (us-east-1).

" + "smithy.api#documentation": "

Specifies the Region where the bucket will be created. You might choose a Region to\n optimize latency, minimize costs, or address regulatory requirements. For example, if you\n reside in Europe, you will probably find it advantageous to create buckets in the Europe\n (Ireland) Region. For more information, see Accessing a\n bucket in the Amazon S3 User Guide.

\n

If you don't specify a Region,\n the bucket is created in the US East (N. Virginia) Region (us-east-1) by default.

\n \n

This functionality is not supported for directory buckets.

\n
" + } + }, + "Location": { + "target": "com.amazonaws.s3#LocationInfo", + "traits": { + "smithy.api#documentation": "

Specifies the location where the bucket will be created.

\n

For directory buckets, the location type is Availability Zone.

\n \n

This functionality is only supported by directory buckets.

\n
" + } + }, + "Bucket": { + "target": "com.amazonaws.s3#BucketInfo", + "traits": { + "smithy.api#documentation": "

Specifies the information about the bucket that will be created.

\n \n

This functionality is only supported by directory buckets.

\n
" } } }, @@ -17372,14 +19151,14 @@ "ACL": { "target": "com.amazonaws.s3#BucketCannedACL", "traits": { - "smithy.api#documentation": "

The canned ACL to apply to the bucket.

", + "smithy.api#documentation": "

The canned ACL to apply to the bucket.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-acl" } }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket to create.

", + "smithy.api#documentation": "

The name of the bucket to create.

\n

\n General purpose buckets - For information about bucket naming\n restrictions, see Bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -17398,42 +19177,42 @@ "GrantFullControl": { "target": "com.amazonaws.s3#GrantFullControl", "traits": { - "smithy.api#documentation": "

Allows grantee the read, write, read ACP, and write ACP permissions on the\n bucket.

", + "smithy.api#documentation": "

Allows grantee the read, write, read ACP, and write ACP permissions on the\n bucket.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-grant-full-control" } }, "GrantRead": { "target": "com.amazonaws.s3#GrantRead", "traits": { - "smithy.api#documentation": "

Allows grantee to list the objects in the bucket.

", + "smithy.api#documentation": "

Allows grantee to list the objects in the bucket.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-grant-read" } }, "GrantReadACP": { "target": "com.amazonaws.s3#GrantReadACP", "traits": { - "smithy.api#documentation": "

Allows grantee to read the bucket ACL.

", + "smithy.api#documentation": "

Allows grantee to read the bucket ACL.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-grant-read-acp" } }, "GrantWrite": { "target": "com.amazonaws.s3#GrantWrite", "traits": { - "smithy.api#documentation": "

Allows grantee to create new objects in the bucket.

\n

For the bucket and object owners of existing objects, also allows deletions and\n overwrites of those objects.

", + "smithy.api#documentation": "

Allows grantee to create new objects in the bucket.

\n

For the bucket and object owners of existing objects, also allows deletions and\n overwrites of those objects.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-grant-write" } }, "GrantWriteACP": { "target": "com.amazonaws.s3#GrantWriteACP", "traits": { - "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable bucket.

", + "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable bucket.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-grant-write-acp" } }, "ObjectLockEnabledForBucket": { "target": "com.amazonaws.s3#ObjectLockEnabledForBucket", "traits": { - "smithy.api#documentation": "

Specifies whether you want S3 Object Lock to be enabled for the new bucket.

", + "smithy.api#documentation": "

Specifies whether you want S3 Object Lock to be enabled for the new bucket.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-bucket-object-lock-enabled" } }, @@ -17457,7 +19236,7 @@ "target": "com.amazonaws.s3#CreateMultipartUploadOutput" }, "traits": { - "smithy.api#documentation": "

This action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.

\n

For more information about multipart uploads, see Multipart Upload Overview.

\n

If you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.

\n

For information about the permissions required to use the multipart upload API, see\n Multipart\n Upload and Permissions.

\n

For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4).

\n \n

After you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.

\n
\n

Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload. You can request that Amazon S3\n save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key\n (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key\n (SSE-C).

\n

To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt and kms:GenerateDataKey*\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.

\n

If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role belongs\n to a different account than the key, then you must have the permissions on both the key\n policy and your IAM user or role.

\n

For more information, see Protecting Data Using Server-Side\n Encryption.

\n
\n
Access Permissions
\n
\n

When copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:

\n
    \n
  • \n

    Specify a canned ACL with the x-amz-acl request header. For\n more information, see Canned\n ACL.

    \n
  • \n
  • \n

    Specify access permissions explicitly with the\n x-amz-grant-read, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL) Overview.

    \n
  • \n
\n

You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.

\n
\n
Server-Side- Encryption-Specific Request Headers
\n
\n

Amazon S3 encrypts data by using server-side encryption with an Amazon S3 managed key\n (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it\n when you access it. You can request that Amazon S3 encrypts data at rest by using\n server-side encryption with other key options. The option you use depends on\n whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys\n (SSE-C).

\n
    \n
  • \n

    Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) –\n If you want Amazon Web Services to manage the keys used to encrypt data, specify the\n following headers in the request.

    \n
      \n
    • \n

      \n x-amz-server-side-encryption\n

      \n
    • \n
    • \n

      \n x-amz-server-side-encryption-aws-kms-key-id\n

      \n
    • \n
    • \n

      \n x-amz-server-side-encryption-context\n

      \n
    • \n
    \n \n

    If you specify x-amz-server-side-encryption:aws:kms, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to\n protect the data.

    \n
    \n \n

    All GET and PUT requests for an object\n protected by KMS fail if you don't make them by using Secure Sockets\n Layer (SSL), Transport Layer Security (TLS), or Signature Version\n 4.

    \n
    \n

    For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys.

    \n
  • \n
  • \n

    Use customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.

    \n
      \n
    • \n

      \n x-amz-server-side-encryption-customer-algorithm\n

      \n
    • \n
    • \n

      \n x-amz-server-side-encryption-customer-key\n

      \n
    • \n
    • \n

      \n x-amz-server-side-encryption-customer-key-MD5\n

      \n
    • \n
    \n

    For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C).

    \n
  • \n
\n
\n
Access-Control-List (ACL)-Specific Request Headers
\n
\n

You also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual\n Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then\n added to the access control list (ACL) on the object. For more information, see\n Using ACLs. With this operation, you can grant access permissions\n using one of the following two methods:

\n
    \n
  • \n

    Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.

    \n
  • \n
  • \n

    Specify access permissions explicitly — To explicitly grant access\n permissions to specific Amazon Web Services accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview. In the header, you specify a list of grantees who get\n the specific permission. To grant permissions explicitly, use:

    \n
      \n
    • \n

      \n x-amz-grant-read\n

      \n
    • \n
    • \n

      \n x-amz-grant-write\n

      \n
    • \n
    • \n

      \n x-amz-grant-read-acp\n

      \n
    • \n
    • \n

      \n x-amz-grant-write-acp\n

      \n
    • \n
    • \n

      \n x-amz-grant-full-control\n

      \n
    • \n
    \n

    You specify each grantee as a type=value pair, where the type is one of\n the following:

    \n
      \n
    • \n

      \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

      \n
    • \n
    • \n

      \n uri – if you are granting permissions to a predefined\n group

      \n
    • \n
    • \n

      \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

      \n \n

      Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

      \n
        \n
      • \n

        US East (N. Virginia)

        \n
      • \n
      • \n

        US West (N. California)

        \n
      • \n
      • \n

        US West (Oregon)

        \n
      • \n
      • \n

        Asia Pacific (Singapore)

        \n
      • \n
      • \n

        Asia Pacific (Sydney)

        \n
      • \n
      • \n

        Asia Pacific (Tokyo)

        \n
      • \n
      • \n

        Europe (Ireland)

        \n
      • \n
      • \n

        South America (São Paulo)

        \n
      • \n
      \n

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      \n
      \n
    • \n
    \n

    For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

    \n

    \n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

    \n
  • \n
\n
\n
\n

The following operations are related to CreateMultipartUpload:

\n ", + "smithy.api#documentation": "

This action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

\n \n

After you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stops charging you for\n storing them only after you either complete or abort a multipart upload.

\n
\n

If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart \n upload must be completed within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.

\n \n
    \n
  • \n

    \n Directory buckets - S3 Lifecycle is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n
Request signing
\n
\n

For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about the permissions required to use the multipart upload API, see\n Multipart\n upload and permissions in the Amazon S3 User Guide.

    \n

    To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt and kms:GenerateDataKey*\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request.

    \n
      \n
    • \n

      Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) –\n If you want Amazon Web Services to manage the keys used to encrypt data, specify the\n following headers in the request.

      \n
        \n
      • \n

        \n x-amz-server-side-encryption\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-aws-kms-key-id\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-context\n

        \n
      • \n
      \n \n
        \n
      • \n

        If you specify x-amz-server-side-encryption:aws:kms, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to\n protect the data.

        \n
      • \n
      • \n

        To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt and kms:GenerateDataKey*\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.

        \n
      • \n
      • \n

        If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key\n policy and your IAM user or role.

        \n
      • \n
      • \n

        All GET and PUT requests for an object\n protected by KMS fail if you don't make them by using Secure Sockets\n Layer (SSL), Transport Layer Security (TLS), or Signature Version\n 4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.

        \n
      • \n
      \n
      \n

      For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.

      \n
    • \n
    • \n

      Use customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.

      \n
        \n
      • \n

        \n x-amz-server-side-encryption-customer-algorithm\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-customer-key\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-customer-key-MD5\n

        \n
      • \n
      \n

      For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C) in the Amazon S3 User Guide.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory buckets -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CreateMultipartUpload:

\n ", "smithy.api#examples": [ { "title": "To initiate a multipart upload", @@ -17486,21 +19265,21 @@ "AbortDate": { "target": "com.amazonaws.s3#AbortDate", "traits": { - "smithy.api#documentation": "

If the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, the response includes this header. The header indicates when the initiated\n multipart upload becomes eligible for an abort operation. For more information, see \n Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.

\n

The response also includes the x-amz-abort-rule-id header that provides the\n ID of the lifecycle configuration rule that defines this action.

", + "smithy.api#documentation": "

If the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, the response includes this header. The header indicates when the initiated\n multipart upload becomes eligible for an abort operation. For more information, see \n Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration in the Amazon S3 User Guide.

\n

The response also includes the x-amz-abort-rule-id header that provides the\n ID of the lifecycle configuration rule that defines the abort action.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-abort-date" } }, "AbortRuleId": { "target": "com.amazonaws.s3#AbortRuleId", "traits": { - "smithy.api#documentation": "

This header is returned along with the x-amz-abort-date header. It\n identifies the applicable lifecycle configuration rule that defines the action to abort\n incomplete multipart uploads.

", + "smithy.api#documentation": "

This header is returned along with the x-amz-abort-date header. It\n identifies the applicable lifecycle configuration rule that defines the action to abort\n incomplete multipart uploads.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-abort-rule-id" } }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket to which the multipart upload was initiated. Does not return the\n access point ARN or access point alias if used.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket to which the multipart upload was initiated. Does not return the\n access point ARN or access point alias if used.

\n \n

Access points are not supported by directory buckets.

\n
", "smithy.api#xmlName": "Bucket" } }, @@ -17519,42 +19298,42 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header confirming the encryption algorithm used.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to confirm the encryption algorithm that's used.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide round-trip message integrity verification of\n the customer-provided encryption key.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide the round-trip message integrity verification of\n the customer-provided encryption key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

", + "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -17583,14 +19362,14 @@ "ACL": { "target": "com.amazonaws.s3#ObjectCannedACL", "traits": { - "smithy.api#documentation": "

The canned ACL to apply to the object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

The canned ACL to apply to the object. Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL in the Amazon S3 User Guide.

\n

By default, all objects are private. Only the owner has full access\n control. When uploading an object, you can grant access permissions to individual\n Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then\n added to the access control list (ACL) on the new object. For more information, see\n Using ACLs. One way to\n grant the permissions using the request headers is to specify a canned ACL with the x-amz-acl request header.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-acl" } }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket to which to initiate the upload

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket where the multipart upload is initiated and where the object is uploaded.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -17615,14 +19394,14 @@ "ContentEncoding": { "target": "com.amazonaws.s3#ContentEncoding", "traits": { - "smithy.api#documentation": "

Specifies what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

", + "smithy.api#documentation": "

Specifies what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

\n \n

For directory buckets, only the aws-chunked value is supported in this header field.

\n
", "smithy.api#httpHeader": "Content-Encoding" } }, "ContentLanguage": { "target": "com.amazonaws.s3#ContentLanguage", "traits": { - "smithy.api#documentation": "

The language the content is in.

", + "smithy.api#documentation": "

The language that the content is in.

", "smithy.api#httpHeader": "Content-Language" } }, @@ -17643,28 +19422,28 @@ "GrantFullControl": { "target": "com.amazonaws.s3#GrantFullControl", "traits": { - "smithy.api#documentation": "

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Specify access permissions explicitly to give the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

\n

By default, all objects are private. Only the owner has full access\n control. When uploading an object, you can use this header to explicitly grant access\n permissions to specific Amazon Web Services accounts or groups.\n This header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview in the Amazon S3 User Guide.

\n

You specify each grantee as a type=value pair, where the type is one of\n the following:

\n
    \n
  • \n

    \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

    \n
  • \n
  • \n

    \n uri – if you are granting permissions to a predefined\n group

    \n
  • \n
  • \n

    \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n

For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

\n

\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-full-control" } }, "GrantRead": { "target": "com.amazonaws.s3#GrantRead", "traits": { - "smithy.api#documentation": "

Allows grantee to read the object data and its metadata.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Specify access permissions explicitly to allow grantee to read the object data and its metadata.

\n

By default, all objects are private. Only the owner has full access\n control. When uploading an object, you can use this header to explicitly grant access\n permissions to specific Amazon Web Services accounts or groups.\n This header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview in the Amazon S3 User Guide.

\n

You specify each grantee as a type=value pair, where the type is one of\n the following:

\n
    \n
  • \n

    \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

    \n
  • \n
  • \n

    \n uri – if you are granting permissions to a predefined\n group

    \n
  • \n
  • \n

    \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n

For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

\n

\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-read" } }, "GrantReadACP": { "target": "com.amazonaws.s3#GrantReadACP", "traits": { - "smithy.api#documentation": "

Allows grantee to read the object ACL.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Specify access permissions explicitly to allows grantee to read the object ACL.

\n

By default, all objects are private. Only the owner has full access\n control. When uploading an object, you can use this header to explicitly grant access\n permissions to specific Amazon Web Services accounts or groups.\n This header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview in the Amazon S3 User Guide.

\n

You specify each grantee as a type=value pair, where the type is one of\n the following:

\n
    \n
  • \n

    \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

    \n
  • \n
  • \n

    \n uri – if you are granting permissions to a predefined\n group

    \n
  • \n
  • \n

    \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n

For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

\n

\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-read-acp" } }, "GrantWriteACP": { "target": "com.amazonaws.s3#GrantWriteACP", "traits": { - "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Specify access permissions explicitly to allows grantee to allow grantee to write the ACL for the applicable object.

\n

By default, all objects are private. Only the owner has full access\n control. When uploading an object, you can use this header to explicitly grant access\n permissions to specific Amazon Web Services accounts or groups.\n This header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview in the Amazon S3 User Guide.

\n

You specify each grantee as a type=value pair, where the type is one of\n the following:

\n
    \n
  • \n

    \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

    \n
  • \n
  • \n

    \n uri – if you are granting permissions to a predefined\n group

    \n
  • \n
  • \n

    \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n

For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

\n

\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-write-acp" } }, @@ -17689,63 +19468,63 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.

\n \n
    \n
  • \n

    For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects.

    \n
  • \n
  • \n

    Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-storage-class" } }, "WebsiteRedirectLocation": { "target": "com.amazonaws.s3#WebsiteRedirectLocation", "traits": { - "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata.

", + "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-website-redirect-location" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use to when encrypting the object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example,\n AES256).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption.\n All GET and PUT requests for an object protected by KMS will fail if they're not made via\n SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.

\n

Specifying this header with an object action doesn’t affect bucket-level settings for S3\n Bucket Key.

", + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.

\n

Specifying this header with an object action doesn’t affect bucket-level settings for S3\n Bucket Key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -17758,42 +19537,42 @@ "Tagging": { "target": "com.amazonaws.s3#TaggingHeader", "traits": { - "smithy.api#documentation": "

The tag-set for the object. The tag-set must be encoded as URL Query parameters.

", + "smithy.api#documentation": "

The tag-set for the object. The tag-set must be encoded as URL Query parameters.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-tagging" } }, "ObjectLockMode": { "target": "com.amazonaws.s3#ObjectLockMode", "traits": { - "smithy.api#documentation": "

Specifies the Object Lock mode that you want to apply to the uploaded object.

", + "smithy.api#documentation": "

Specifies the Object Lock mode that you want to apply to the uploaded object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-mode" } }, "ObjectLockRetainUntilDate": { "target": "com.amazonaws.s3#ObjectLockRetainUntilDate", "traits": { - "smithy.api#documentation": "

Specifies the date and time when you want the Object Lock to expire.

", + "smithy.api#documentation": "

Specifies the date and time when you want the Object Lock to expire.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-retain-until-date" } }, "ObjectLockLegalHoldStatus": { "target": "com.amazonaws.s3#ObjectLockLegalHoldStatus", "traits": { - "smithy.api#documentation": "

Specifies whether you want to apply a legal hold to the uploaded object.

", + "smithy.api#documentation": "

Specifies whether you want to apply a legal hold to the uploaded object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-legal-hold" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm you want Amazon S3 to use to create the checksum for the object. For more information, see\n Checking object integrity in\n the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see\n Checking object integrity in\n the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-algorithm" } } @@ -17802,9 +19581,89 @@ "smithy.api#input": {} } }, + "com.amazonaws.s3#CreateSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.s3#CreateSessionRequest" + }, + "output": { + "target": "com.amazonaws.s3#CreateSessionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.s3#NoSuchBucket" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. \n For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see \n S3 Express One Zone APIs in the Amazon S3 User Guide. \n

\n

To make Zonal endpoint API requests on a directory bucket, use the CreateSession\n API operation. Specifically, you grant s3express:CreateSession permission to a\n bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the\n CreateSession API request on the bucket, which returns temporary security\n credentials that include the access key ID, secret access key, session token, and\n expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After\n the session is created, you don’t need to use other policies to grant permissions to each\n Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by\n applying the temporary security credentials of the session to the request headers and\n following the SigV4 protocol for authentication. You also apply the session token to the\n x-amz-s3session-token request header for authorization. Temporary security\n credentials are scoped to the bucket and expire after 5 minutes. After the expiration time,\n any calls that you make with those credentials will fail. You must use IAM credentials\n again to make a CreateSession API request that generates a new set of\n temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond\n the original specified interval.

\n

If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid\n service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to\n initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the\n Amazon S3 User Guide.

\n \n
    \n
  • \n

    You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n \n CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject.

    \n
  • \n
  • \n

    \n \n HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n

To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that\n grants s3express:CreateSession permission to the bucket. In a\n policy, you can have the s3express:SessionMode condition key to\n control who can create a ReadWrite or ReadOnly session.\n For more information about ReadWrite or ReadOnly\n sessions, see \n x-amz-create-session-mode\n . For example policies, see\n Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

\n

To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession permission.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
", + "smithy.api#http": { + "method": "GET", + "uri": "/{Bucket}?session", + "code": 200 + }, + "smithy.rules#staticContextParams": { + "DisableS3ExpressSessionAuth": { + "value": true + } + } + } + }, + "com.amazonaws.s3#CreateSessionOutput": { + "type": "structure", + "members": { + "Credentials": { + "target": "com.amazonaws.s3#SessionCredentials", + "traits": { + "smithy.api#documentation": "

The established temporary security credentials for the created session..

", + "smithy.api#required": {}, + "smithy.api#xmlName": "Credentials" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.s3#CreateSessionRequest": { + "type": "structure", + "members": { + "SessionMode": { + "target": "com.amazonaws.s3#SessionMode", + "traits": { + "smithy.api#documentation": "

Specifies the mode of the session that will be created, either ReadWrite or\n ReadOnly. By default, a ReadWrite session is created. A\n ReadWrite session is capable of executing all the Zonal endpoint APIs on a\n directory bucket. A ReadOnly session is constrained to execute the following\n Zonal endpoint APIs: GetObject, HeadObject, ListObjectsV2,\n GetObjectAttributes, ListParts, and\n ListMultipartUploads.

", + "smithy.api#httpHeader": "x-amz-create-session-mode" + } + }, + "Bucket": { + "target": "com.amazonaws.s3#BucketName", + "traits": { + "smithy.api#documentation": "

The name of the bucket that you create a session for.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "Bucket" + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.s3#CreationDate": { "type": "timestamp" }, + "com.amazonaws.s3#DataRedundancy": { + "type": "enum", + "members": { + "SingleAvailabilityZone": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SingleAvailabilityZone" + } + } + } + }, "com.amazonaws.s3#Date": { "type": "timestamp", "traits": { @@ -17849,7 +19708,7 @@ "Objects": { "target": "com.amazonaws.s3#ObjectIdentifierList", "traits": { - "smithy.api#documentation": "

The object to delete.

", + "smithy.api#documentation": "

The object to delete.

\n \n

\n Directory buckets - For directory buckets, an object that's composed entirely of \n whitespace characters is not supported by the DeleteObjects API operation. The request will receive a 400 Bad Request error \n and none of the objects in the request will be deleted.

\n
", "smithy.api#required": {}, "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Object" @@ -17858,7 +19717,7 @@ "Quiet": { "target": "com.amazonaws.s3#Quiet", "traits": { - "smithy.api#documentation": "

Element to enable quiet mode for the request. When you add this element, you must set\n its value to true.

" + "smithy.api#documentation": "

Element to enable quiet mode for the request. When you add this element, you must set\n its value to true.

" } } }, @@ -17875,7 +19734,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes the S3 bucket. All objects (including all object versions and delete markers) in\n the bucket must be deleted before the bucket itself can be deleted.

\n

The following operations are related to DeleteBucket:

\n ", + "smithy.api#documentation": "

Deletes the S3 bucket. All objects (including all object versions and delete markers) in\n the bucket must be deleted before the bucket itself can be deleted.

\n \n
    \n
  • \n

    \n Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - You must have the s3:DeleteBucket permission on the specified bucket in a policy.

    \n
  • \n
  • \n

    \n Directory bucket permissions - You must have the s3express:DeleteBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to DeleteBucket:

\n ", "smithy.api#examples": [ { "title": "To delete a bucket", @@ -17889,6 +19748,11 @@ "method": "DELETE", "uri": "/{Bucket}", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -17901,11 +19765,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes an analytics configuration for the bucket (specified by the analytics\n configuration ID).

\n

To use this operation, you must have permissions to perform the\n s3:PutAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis.

\n

The following operations are related to\n DeleteBucketAnalyticsConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes an analytics configuration for the bucket (specified by the analytics\n configuration ID).

\n

To use this operation, you must have permissions to perform the\n s3:PutAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis.

\n

The following operations are related to\n DeleteBucketAnalyticsConfiguration:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?analytics", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -17934,7 +19803,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -17952,7 +19821,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes the cors configuration information set for the bucket.

\n

To use this operation, you must have permission to perform the\n s3:PutBucketCORS action. The bucket owner has this permission by default\n and can grant this permission to others.

\n

For information about cors, see Enabling Cross-Origin Resource Sharing in\n the Amazon S3 User Guide.

\n

\n Related Resources\n

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes the cors configuration information set for the bucket.

\n

To use this operation, you must have permission to perform the\n s3:PutBucketCORS action. The bucket owner has this permission by default\n and can grant this permission to others.

\n

For information about cors, see Enabling Cross-Origin Resource Sharing in\n the Amazon S3 User Guide.

\n

\n Related Resources\n

\n ", "smithy.api#examples": [ { "title": "To delete cors configuration on a bucket.", @@ -17966,6 +19835,11 @@ "method": "DELETE", "uri": "/{Bucket}?cors", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -17986,7 +19860,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18004,11 +19878,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

This implementation of the DELETE action resets the default encryption for the bucket as\n server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket\n default encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.

\n

To use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to DeleteBucketEncryption:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This implementation of the DELETE action resets the default encryption for the bucket as\n server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket\n default encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.

\n

To use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to DeleteBucketEncryption:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?encryption", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18029,7 +19908,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18047,11 +19926,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to DeleteBucketIntelligentTieringConfiguration include:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to DeleteBucketIntelligentTieringConfiguration include:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?intelligent-tiering", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18091,11 +19975,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes an inventory configuration (identified by the inventory ID) from the\n bucket.

\n

To use this operation, you must have permissions to perform the\n s3:PutInventoryConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

\n

Operations related to DeleteBucketInventoryConfiguration include:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes an inventory configuration (identified by the inventory ID) from the\n bucket.

\n

To use this operation, you must have permissions to perform the\n s3:PutInventoryConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

\n

Operations related to DeleteBucketInventoryConfiguration include:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?inventory", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18124,7 +20013,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18142,7 +20031,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the\n lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your\n objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of\n rules contained in the deleted lifecycle configuration.

\n

To use this operation, you must have permission to perform the\n s3:PutLifecycleConfiguration action. By default, the bucket owner has this\n permission and the bucket owner can grant this permission to others.

\n

There is usually some time lag before lifecycle configuration deletion is fully\n propagated to all the Amazon S3 systems.

\n

For more information about the object expiration, see Elements to Describe Lifecycle Actions.

\n

Related actions include:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the\n lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your\n objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of\n rules contained in the deleted lifecycle configuration.

\n

To use this operation, you must have permission to perform the\n s3:PutLifecycleConfiguration action. By default, the bucket owner has this\n permission and the bucket owner can grant this permission to others.

\n

There is usually some time lag before lifecycle configuration deletion is fully\n propagated to all the Amazon S3 systems.

\n

For more information about the object expiration, see Elements to Describe Lifecycle Actions.

\n

Related actions include:

\n ", "smithy.api#examples": [ { "title": "To delete lifecycle configuration on a bucket.", @@ -18156,6 +20045,11 @@ "method": "DELETE", "uri": "/{Bucket}?lifecycle", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18176,7 +20070,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18194,11 +20088,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the\n metrics configuration ID) from the bucket. Note that this doesn't include the daily storage\n metrics.

\n

To use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with\n Amazon CloudWatch.

\n

The following operations are related to\n DeleteBucketMetricsConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the\n metrics configuration ID) from the bucket. Note that this doesn't include the daily storage\n metrics.

\n

To use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with\n Amazon CloudWatch.

\n

The following operations are related to\n DeleteBucketMetricsConfiguration:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?metrics", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18227,7 +20126,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18245,11 +20144,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you\n must have the s3:PutBucketOwnershipControls permission. For more information\n about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n

For information about Amazon S3 Object Ownership, see Using Object Ownership.

\n

The following operations are related to\n DeleteBucketOwnershipControls:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you\n must have the s3:PutBucketOwnershipControls permission. For more information\n about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n

For information about Amazon S3 Object Ownership, see Using Object Ownership.

\n

The following operations are related to\n DeleteBucketOwnershipControls:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?ownershipControls", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18270,7 +20174,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18288,7 +20192,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

This implementation of the DELETE action uses the policy subresource to delete the\n policy of a specified bucket. If you are using an identity other than the root user of the\n Amazon Web Services account that owns the bucket, the calling identity must have the\n DeleteBucketPolicy permissions on the specified bucket and belong to the\n bucket owner's account to use this operation.

\n

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403\n Access Denied error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed error.

\n \n

To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy, PutBucketPolicy, and\n DeleteBucketPolicy API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.

\n
\n

For more information about bucket policies, see Using Bucket Policies and\n UserPolicies.

\n

The following operations are related to DeleteBucketPolicy\n

\n ", + "smithy.api#documentation": "

Deletes the\n policy of a specified bucket.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the\n DeleteBucketPolicy permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.

\n

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403\n Access Denied error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed error.

\n \n

To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy, PutBucketPolicy, and\n DeleteBucketPolicy API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.

\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The s3:DeleteBucketPolicy permission is required in a policy. \n For more information about general purpose buckets bucket policies, see Using Bucket Policies and User\n Policies in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation, you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to DeleteBucketPolicy\n

\n ", "smithy.api#examples": [ { "title": "To delete bucket policy", @@ -18302,6 +20206,11 @@ "method": "DELETE", "uri": "/{Bucket}?policy", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18311,7 +20220,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name.

", + "smithy.api#documentation": "

The bucket name.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -18322,7 +20231,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

\n \n

For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented.

\n
", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18340,7 +20249,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes the replication configuration from the bucket.

\n

To use this operation, you must have permissions to perform the\n s3:PutReplicationConfiguration action. The bucket owner has these\n permissions by default and can grant it to others. For more information about permissions,\n see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n \n

It can take a while for the deletion of a replication configuration to fully\n propagate.

\n
\n

For information about replication configuration, see Replication in the\n Amazon S3 User Guide.

\n

The following operations are related to DeleteBucketReplication:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes the replication configuration from the bucket.

\n

To use this operation, you must have permissions to perform the\n s3:PutReplicationConfiguration action. The bucket owner has these\n permissions by default and can grant it to others. For more information about permissions,\n see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n \n

It can take a while for the deletion of a replication configuration to fully\n propagate.

\n
\n

For information about replication configuration, see Replication in the\n Amazon S3 User Guide.

\n

The following operations are related to DeleteBucketReplication:

\n ", "smithy.api#examples": [ { "title": "To delete bucket replication configuration", @@ -18354,6 +20263,11 @@ "method": "DELETE", "uri": "/{Bucket}?replication", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18374,7 +20288,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18389,7 +20303,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

Specifies the bucket being deleted.

", + "smithy.api#documentation": "

Specifies the bucket being deleted.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -18400,7 +20314,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

\n \n

For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented.

\n
", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18418,7 +20332,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes the tags from the bucket.

\n

To use this operation, you must have permission to perform the\n s3:PutBucketTagging action. By default, the bucket owner has this\n permission and can grant this permission to others.

\n

The following operations are related to DeleteBucketTagging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Deletes the tags from the bucket.

\n

To use this operation, you must have permission to perform the\n s3:PutBucketTagging action. By default, the bucket owner has this\n permission and can grant this permission to others.

\n

The following operations are related to DeleteBucketTagging:

\n ", "smithy.api#examples": [ { "title": "To delete bucket tags", @@ -18432,6 +20346,11 @@ "method": "DELETE", "uri": "/{Bucket}?tagging", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18452,7 +20371,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18470,7 +20389,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

This action removes the website configuration for a bucket. Amazon S3 returns a 200\n OK response upon successfully deleting a website configuration on the specified\n bucket. You will get a 200 OK response if the website configuration you are\n trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if\n the bucket specified in the request does not exist.

\n

This DELETE action requires the S3:DeleteBucketWebsite permission. By\n default, only the bucket owner can delete the website configuration attached to a bucket.\n However, bucket owners can grant other users permission to delete the website configuration\n by writing a bucket policy granting them the S3:DeleteBucketWebsite\n permission.

\n

For more information about hosting websites, see Hosting Websites on Amazon S3.

\n

The following operations are related to DeleteBucketWebsite:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action removes the website configuration for a bucket. Amazon S3 returns a 200\n OK response upon successfully deleting a website configuration on the specified\n bucket. You will get a 200 OK response if the website configuration you are\n trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if\n the bucket specified in the request does not exist.

\n

This DELETE action requires the S3:DeleteBucketWebsite permission. By\n default, only the bucket owner can delete the website configuration attached to a bucket.\n However, bucket owners can grant other users permission to delete the website configuration\n by writing a bucket policy granting them the S3:DeleteBucketWebsite\n permission.

\n

For more information about hosting websites, see Hosting Websites on Amazon S3.

\n

The following operations are related to DeleteBucketWebsite:

\n ", "smithy.api#examples": [ { "title": "To delete bucket website configuration", @@ -18484,6 +20403,11 @@ "method": "DELETE", "uri": "/{Bucket}?website", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18504,7 +20428,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18603,15 +20527,16 @@ "target": "com.amazonaws.s3#DeleteObjectOutput" }, "traits": { - "smithy.api#documentation": "

Removes the null version (if there is one) of an object and inserts a delete marker,\n which becomes the latest version of the object. If there isn't a null version, Amazon S3 does\n not remove any objects but will still respond that the command was successful.

\n

To remove a specific version, you must use the version Id subresource. Using this\n subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3\n sets the response header, x-amz-delete-marker, to true.

\n

If the object you want to delete is in a bucket where the bucket versioning\n configuration is MFA Delete enabled, you must include the x-amz-mfa request\n header in the DELETE versionId request. Requests that include\n x-amz-mfa must use HTTPS.

\n

For more information about MFA Delete, see Using MFA Delete. To see sample\n requests that use versioning, see Sample\n Request.

\n

You can delete objects by explicitly calling DELETE Object or configure its lifecycle\n (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block\n users or accounts from removing or deleting objects from your bucket, you must deny them\n the s3:DeleteObject, s3:DeleteObjectVersion, and\n s3:PutLifeCycleConfiguration actions.

\n

The following action is related to DeleteObject:

\n ", + "smithy.api#documentation": "

Removes an object from a bucket. The behavior depends on the bucket's versioning state:

\n
    \n
  • \n

    If versioning is enabled, the operation removes the null version (if there is one) of an object and inserts a delete marker,\n which becomes the latest version of the object. If there isn't a null version, Amazon S3 does\n not remove any objects but will still respond that the command was successful.

    \n
  • \n
  • \n

    If versioning is suspended or not enabled, the operation permanently deletes the object.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

To remove a specific version, you must use the versionId query parameter. Using this\n query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3\n sets the response header x-amz-delete-marker to true.

\n

If the object you want to delete is in a bucket where the bucket versioning\n configuration is MFA Delete enabled, you must include the x-amz-mfa request\n header in the DELETE versionId request. Requests that include\n x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3\n User Guide. To see sample\n requests that use versioning, see Sample\n Request.

\n \n

\n Directory buckets - MFA delete is not supported by directory buckets.

\n
\n

You can delete objects by explicitly calling DELETE Object or calling \n (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block\n users or accounts from removing or deleting objects from your bucket, you must deny them\n the s3:DeleteObject, s3:DeleteObjectVersion, and\n s3:PutLifeCycleConfiguration actions.

\n \n

\n Directory buckets - S3 Lifecycle is not supported by directory buckets.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n DeleteObjects request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:DeleteObject\n - To delete an object from a bucket, you must always have the s3:DeleteObject permission.

      \n
    • \n
    • \n

      \n \n s3:DeleteObjectVersion\n - To delete a specific version of an object from a versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following action is related to DeleteObject:

\n ", "smithy.api#examples": [ { - "title": "To delete an object (from a non-versioned bucket)", - "documentation": "The following example deletes an object from a non-versioned bucket.", + "title": "To delete an object", + "documentation": "The following example deletes an object from an S3 bucket.", "input": { - "Bucket": "ExampleBucket", - "Key": "HappyFace.jpg" - } + "Bucket": "examplebucket", + "Key": "objectkey.jpg" + }, + "output": {} } ], "smithy.api#http": { @@ -18627,14 +20552,14 @@ "DeleteMarker": { "target": "com.amazonaws.s3#DeleteMarker", "traits": { - "smithy.api#documentation": "

Indicates whether the specified object version that was permanently deleted was (true) or was\n not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or\n not (false) the current version of the object is a delete marker.

", + "smithy.api#documentation": "

Indicates whether the specified object version that was permanently deleted was (true) or was\n not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or\n not (false) the current version of the object is a delete marker.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-delete-marker" } }, "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

Returns the version ID of the delete marker created as a result of the DELETE\n operation.

", + "smithy.api#documentation": "

Returns the version ID of the delete marker created as a result of the DELETE\n operation.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-version-id" } }, @@ -18655,7 +20580,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name of the bucket containing the object.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name of the bucket containing the object.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -18677,14 +20602,14 @@ "MFA": { "target": "com.amazonaws.s3#MFA", "traits": { - "smithy.api#documentation": "

The concatenation of the authentication device's serial number, a space, and the value\n that is displayed on your authentication device. Required to permanently delete a versioned\n object if versioning is configured with MFA delete enabled.

", + "smithy.api#documentation": "

The concatenation of the authentication device's serial number, a space, and the value\n that is displayed on your authentication device. Required to permanently delete a versioned\n object if versioning is configured with MFA delete enabled.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-mfa" } }, "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

VersionId used to reference a specific version of the object.

", + "smithy.api#documentation": "

Version ID used to reference a specific version of the object.

\n \n

For directory buckets in this API operation, only the null value of the version ID is supported.

\n
", "smithy.api#httpQuery": "versionId" } }, @@ -18697,14 +20622,14 @@ "BypassGovernanceRetention": { "target": "com.amazonaws.s3#BypassGovernanceRetention", "traits": { - "smithy.api#documentation": "

Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process\n this operation. To use this header, you must have the\n s3:BypassGovernanceRetention permission.

", + "smithy.api#documentation": "

Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process\n this operation. To use this header, you must have the\n s3:BypassGovernanceRetention permission.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-bypass-governance-retention" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18722,7 +20647,7 @@ "target": "com.amazonaws.s3#DeleteObjectTaggingOutput" }, "traits": { - "smithy.api#documentation": "

Removes the entire tag set from the specified object. For more information about\n managing object tags, see Object Tagging.

\n

To use this operation, you must have permission to perform the\n s3:DeleteObjectTagging action.

\n

To delete tags of a specific object version, add the versionId query\n parameter in the request. You will need permission for the\n s3:DeleteObjectVersionTagging action.

\n

The following operations are related to DeleteObjectTagging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Removes the entire tag set from the specified object. For more information about\n managing object tags, see Object Tagging.

\n

To use this operation, you must have permission to perform the\n s3:DeleteObjectTagging action.

\n

To delete tags of a specific object version, add the versionId query\n parameter in the request. You will need permission for the\n s3:DeleteObjectVersionTagging action.

\n

The following operations are related to DeleteObjectTagging:

\n ", "smithy.api#examples": [ { "title": "To remove tag set from an object", @@ -18764,7 +20689,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the objects from which to remove the tags.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the objects from which to remove the tags.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -18790,7 +20715,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -18812,41 +20737,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

This action enables you to delete multiple objects from a bucket using a single HTTP\n request. If you know the object keys that you want to delete, then this action provides a\n suitable alternative to sending individual delete requests, reducing per-request\n overhead.

\n

The request contains a list of up to 1000 keys that you want to delete. In the XML, you\n provide the object key names, and optionally, version IDs if you want to delete a specific\n version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a\n delete action and returns the result of that delete, success, or failure, in the response.\n Note that if the object specified in the request is not found, Amazon S3 returns the result as\n deleted.

\n

The action supports two modes for the response: verbose and quiet. By default, the\n action uses verbose mode in which the response includes the result of deletion of each key\n in your request. In quiet mode the response includes only keys where the delete action\n encountered an error. For a successful deletion, the action does not return any information\n about the delete in the response body.

\n

When performing this action on an MFA Delete enabled bucket, that attempts to delete any\n versioned objects, you must include an MFA token. If you do not provide one, the entire\n request will fail, even if there are non-versioned objects you are trying to delete. If you\n provide an invalid token, whether there are versioned keys in the request or not, the\n entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA\n Delete.

\n

Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3\n uses the header value to ensure that your request body has not been altered in\n transit.

\n

The following operations are related to DeleteObjects:

\n ", - "smithy.api#examples": [ - { - "title": "To delete multiple object versions from a versioned bucket", - "documentation": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", - "input": { - "Bucket": "examplebucket", - "Delete": { - "Objects": [ - { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" - }, - { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" - } - ], - "Quiet": false - } - }, - "output": { - "Deleted": [ - { - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd", - "Key": "HappyFace.jpg" - }, - { - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b", - "Key": "HappyFace.jpg" - } - ] - } - } - ], + "smithy.api#documentation": "

This operation enables you to delete multiple objects from a bucket using a single HTTP\n request. If you know the object keys that you want to delete, then this operation provides a\n suitable alternative to sending individual delete requests, reducing per-request\n overhead.

\n

The request can contain a list of up to 1000 keys that you want to delete. In the XML, you\n provide the object key names, and optionally, version IDs if you want to delete a specific\n version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a\n delete operation and returns the result of that delete, success or failure, in the response.\n Note that if the object specified in the request is not found, Amazon S3 returns the result as\n deleted.

\n \n
    \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

The operation supports two modes for the response: verbose and quiet. By default, the\n operation uses verbose mode in which the response includes the result of deletion of each key\n in your request. In quiet mode the response includes only keys where the delete operation \n encountered an error. For a successful deletion in a quiet mode, the operation does not return any information\n about the delete in the response body.

\n

When performing this action on an MFA Delete enabled bucket, that attempts to delete any\n versioned objects, you must include an MFA token. If you do not provide one, the entire\n request will fail, even if there are non-versioned objects you are trying to delete. If you\n provide an invalid token, whether there are versioned keys in the request or not, the\n entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA\n Delete in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - MFA delete is not supported by directory buckets.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n DeleteObjects request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:DeleteObject\n - To delete an object from a bucket, you must always specify the s3:DeleteObject permission.

      \n
    • \n
    • \n

      \n \n s3:DeleteObjectVersion\n - To delete a specific version of an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion permission.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Content-MD5 request header
\n
\n
    \n
  • \n

    \n General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3\n uses the header value to ensure that your request body has not been altered in\n transit.

    \n
  • \n
  • \n

    \n Directory bucket - The Content-MD5 request header or a additional checksum request header \n (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or \n x-amz-checksum-sha256) is required for all Multi-Object Delete requests.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to DeleteObjects:

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}?delete&x-id=DeleteObjects", @@ -18890,7 +20781,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the objects to delete.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the objects to delete.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -18910,7 +20801,7 @@ "MFA": { "target": "com.amazonaws.s3#MFA", "traits": { - "smithy.api#documentation": "

The concatenation of the authentication device's serial number, a space, and the value\n that is displayed on your authentication device. Required to permanently delete a versioned\n object if versioning is configured with MFA delete enabled.

", + "smithy.api#documentation": "

The concatenation of the authentication device's serial number, a space, and the value\n that is displayed on your authentication device. Required to permanently delete a versioned\n object if versioning is configured with MFA delete enabled.

\n

When performing the DeleteObjects operation on an MFA delete enabled bucket, which attempts to delete the specified \n versioned objects, you must include an MFA token. If you don't provide an MFA token, the entire\n request will fail, even if there are non-versioned objects that you are trying to delete. If you\n provide an invalid token, whether there are versioned object keys in the request or not, the\n entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA\n Delete in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-mfa" } }, @@ -18923,21 +20814,21 @@ "BypassGovernanceRetention": { "target": "com.amazonaws.s3#BypassGovernanceRetention", "traits": { - "smithy.api#documentation": "

Specifies whether you want to delete this object even if it has a Governance-type Object\n Lock in place. To use this header, you must have the\n s3:BypassGovernanceRetention permission.

", + "smithy.api#documentation": "

Specifies whether you want to delete this object even if it has a Governance-type Object\n Lock in place. To use this header, you must have the\n s3:BypassGovernanceRetention permission.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-bypass-governance-retention" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

\n

This checksum algorithm must be the same for all parts and it match the checksum value\n supplied in the CreateMultipartUpload request.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    CRC32

    \n
  • \n
  • \n

    CRC32C

    \n
  • \n
  • \n

    SHA1

    \n
  • \n
  • \n

    SHA256

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } } @@ -18955,11 +20846,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this\n operation, you must have the s3:PutBucketPublicAccessBlock permission. For\n more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

The following operations are related to DeletePublicAccessBlock:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this\n operation, you must have the s3:PutBucketPublicAccessBlock permission. For\n more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

The following operations are related to DeletePublicAccessBlock:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?publicAccessBlock", "code": 204 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -18980,7 +20876,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -19001,19 +20897,19 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

The version ID of the deleted object.

" + "smithy.api#documentation": "

The version ID of the deleted object.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "DeleteMarker": { "target": "com.amazonaws.s3#DeleteMarker", "traits": { - "smithy.api#documentation": "

Indicates whether the specified object version that was permanently deleted was (true) or was\n not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or\n not (false) the current version of the object is a delete marker.

" + "smithy.api#documentation": "

Indicates whether the specified object version that was permanently deleted was (true) or was\n not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or\n not (false) the current version of the object is a delete marker.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "DeleteMarkerVersionId": { "target": "com.amazonaws.s3#DeleteMarkerVersionId", "traits": { - "smithy.api#documentation": "

The version ID of the delete marker created as a result of the DELETE operation. If you\n delete a specific object version, the value returned by this header is the version ID of\n the object version deleted.

" + "smithy.api#documentation": "

The version ID of the delete marker created as a result of the DELETE operation. If you\n delete a specific object version, the value returned by this header is the version ID of\n the object version deleted.

\n \n

This functionality is not supported for directory buckets.

\n
" } } }, @@ -19084,6 +20980,15 @@ "smithy.api#documentation": "

Specifies information about where to publish analysis or configuration results for an\n Amazon S3 bucket and S3 Replication Time Control (S3 RTC).

" } }, + "com.amazonaws.s3#DirectoryBucketToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, "com.amazonaws.s3#DisplayName": { "type": "string" }, @@ -19173,7 +21078,7 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

The version ID of the error.

" + "smithy.api#documentation": "

The version ID of the error.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "Code": { @@ -19571,11 +21476,16 @@ "target": "com.amazonaws.s3#GetBucketAccelerateConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

This implementation of the GET action uses the accelerate subresource to\n return the Transfer Acceleration state of a bucket, which is either Enabled or\n Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that\n enables you to perform faster data transfers to and from Amazon S3.

\n

To use this operation, you must have permission to perform the\n s3:GetAccelerateConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

You set the Transfer Acceleration state of an existing bucket to Enabled or\n Suspended by using the PutBucketAccelerateConfiguration operation.

\n

A GET accelerate request does not return a state value for a bucket that\n has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state\n has never been set on the bucket.

\n

For more information about transfer acceleration, see Transfer Acceleration in\n the Amazon S3 User Guide.

\n

The following operations are related to\n GetBucketAccelerateConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This implementation of the GET action uses the accelerate subresource to\n return the Transfer Acceleration state of a bucket, which is either Enabled or\n Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that\n enables you to perform faster data transfers to and from Amazon S3.

\n

To use this operation, you must have permission to perform the\n s3:GetAccelerateConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

You set the Transfer Acceleration state of an existing bucket to Enabled or\n Suspended by using the PutBucketAccelerateConfiguration operation.

\n

A GET accelerate request does not return a state value for a bucket that\n has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state\n has never been set on the bucket.

\n

For more information about transfer acceleration, see Transfer Acceleration in\n the Amazon S3 User Guide.

\n

The following operations are related to\n GetBucketAccelerateConfiguration:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?accelerate", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -19617,7 +21527,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -19641,11 +21551,16 @@ "target": "com.amazonaws.s3#GetBucketAclOutput" }, "traits": { - "smithy.api#documentation": "

This implementation of the GET action uses the acl subresource\n to return the access control list (ACL) of a bucket. To use GET to return the\n ACL of the bucket, you must have READ_ACP access to the bucket. If\n READ_ACP permission is granted to the anonymous user, you can return the\n ACL of the bucket without using an authorization header.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership,\n requests to read ACLs are still supported and return the\n bucket-owner-full-control ACL with the owner being the account that\n created the bucket. For more information, see Controlling object\n ownership and disabling ACLs in the\n Amazon S3 User Guide.

\n
\n

The following operations are related to GetBucketAcl:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This implementation of the GET action uses the acl subresource\n to return the access control list (ACL) of a bucket. To use GET to return the\n ACL of the bucket, you must have the READ_ACP access to the bucket. If\n READ_ACP permission is granted to the anonymous user, you can return the\n ACL of the bucket without using an authorization header.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership,\n requests to read ACLs are still supported and return the\n bucket-owner-full-control ACL with the owner being the account that\n created the bucket. For more information, see Controlling object\n ownership and disabling ACLs in the\n Amazon S3 User Guide.

\n
\n

The following operations are related to GetBucketAcl:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?acl", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -19677,7 +21592,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

Specifies the S3 bucket whose ACL is being requested.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", + "smithy.api#documentation": "

Specifies the S3 bucket whose ACL is being requested.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -19688,7 +21603,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -19706,11 +21621,16 @@ "target": "com.amazonaws.s3#GetBucketAnalyticsConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

This implementation of the GET action returns an analytics configuration (identified by\n the analytics configuration ID) from the bucket.

\n

To use this operation, you must have permissions to perform the\n s3:GetAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis in the Amazon S3 User Guide.

\n

The following operations are related to\n GetBucketAnalyticsConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This implementation of the GET action returns an analytics configuration (identified by\n the analytics configuration ID) from the bucket.

\n

To use this operation, you must have permissions to perform the\n s3:GetAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis in the Amazon S3 User Guide.

\n

The following operations are related to\n GetBucketAnalyticsConfiguration:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?analytics&x-id=GetBucketAnalyticsConfiguration", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -19754,7 +21674,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -19772,7 +21692,7 @@ "target": "com.amazonaws.s3#GetBucketCorsOutput" }, "traits": { - "smithy.api#documentation": "

Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the\n bucket.

\n

To use this operation, you must have permission to perform the\n s3:GetBucketCORS action. By default, the bucket owner has this permission\n and can grant it to others.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n

For more information about CORS, see Enabling Cross-Origin Resource\n Sharing.

\n

The following operations are related to GetBucketCors:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the\n bucket.

\n

To use this operation, you must have permission to perform the\n s3:GetBucketCORS action. By default, the bucket owner has this permission\n and can grant it to others.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n

For more information about CORS, see Enabling Cross-Origin Resource\n Sharing.

\n

The following operations are related to GetBucketCors:

\n ", "smithy.api#examples": [ { "title": "To get cors configuration set on a bucket", @@ -19802,6 +21722,11 @@ "method": "GET", "uri": "/{Bucket}?cors", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -19828,7 +21753,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name for which to get the cors configuration.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", + "smithy.api#documentation": "

The bucket name for which to get the cors configuration.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -19839,7 +21764,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -19857,11 +21782,16 @@ "target": "com.amazonaws.s3#GetBucketEncryptionOutput" }, "traits": { - "smithy.api#documentation": "

Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets\n have a default encryption configuration that uses server-side encryption with Amazon S3 managed\n keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.

\n

To use this operation, you must have permission to perform the\n s3:GetEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

The following operations are related to GetBucketEncryption:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets\n have a default encryption configuration that uses server-side encryption with Amazon S3 managed\n keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.

\n

To use this operation, you must have permission to perform the\n s3:GetEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

The following operations are related to GetBucketEncryption:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?encryption", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -19896,7 +21826,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -19914,11 +21844,16 @@ "target": "com.amazonaws.s3#GetBucketIntelligentTieringConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to GetBucketIntelligentTieringConfiguration include:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to GetBucketIntelligentTieringConfiguration include:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -19973,11 +21908,16 @@ "target": "com.amazonaws.s3#GetBucketInventoryConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

Returns an inventory configuration (identified by the inventory configuration ID) from\n the bucket.

\n

To use this operation, you must have permissions to perform the\n s3:GetInventoryConfiguration action. The bucket owner has this permission\n by default and can grant this permission to others. For more information about permissions,\n see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

\n

The following operations are related to\n GetBucketInventoryConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns an inventory configuration (identified by the inventory configuration ID) from\n the bucket.

\n

To use this operation, you must have permissions to perform the\n s3:GetInventoryConfiguration action. The bucket owner has this permission\n by default and can grant this permission to others. For more information about permissions,\n see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

\n

The following operations are related to\n GetBucketInventoryConfiguration:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?inventory&x-id=GetBucketInventoryConfiguration", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20021,7 +21961,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20039,7 +21979,7 @@ "target": "com.amazonaws.s3#GetBucketLifecycleConfigurationOutput" }, "traits": { - "smithy.api#documentation": "\n

Bucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The response describes the new filter element\n that you can use to specify a filter to select a subset of objects to which the rule\n applies. If you are using a previous version of the lifecycle configuration, it still\n works. For the earlier action, see GetBucketLifecycle.

\n
\n

Returns the lifecycle configuration information set on the bucket. For information about\n lifecycle configuration, see Object Lifecycle\n Management.

\n

To use this operation, you must have permission to perform the\n s3:GetLifecycleConfiguration action. The bucket owner has this permission,\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

\n GetBucketLifecycleConfiguration has the following special error:

\n
    \n
  • \n

    Error code: NoSuchLifecycleConfiguration\n

    \n
      \n
    • \n

      Description: The lifecycle configuration does not exist.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    • \n

      SOAP Fault Code Prefix: Client

      \n
    • \n
    \n
  • \n
\n

The following operations are related to\n GetBucketLifecycleConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n \n

Bucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The response describes the new filter element\n that you can use to specify a filter to select a subset of objects to which the rule\n applies. If you are using a previous version of the lifecycle configuration, it still\n works. For the earlier action, see GetBucketLifecycle.

\n
\n

Returns the lifecycle configuration information set on the bucket. For information about\n lifecycle configuration, see Object Lifecycle\n Management.

\n

To use this operation, you must have permission to perform the\n s3:GetLifecycleConfiguration action. The bucket owner has this permission,\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

\n GetBucketLifecycleConfiguration has the following special error:

\n
    \n
  • \n

    Error code: NoSuchLifecycleConfiguration\n

    \n
      \n
    • \n

      Description: The lifecycle configuration does not exist.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    • \n

      SOAP Fault Code Prefix: Client

      \n
    • \n
    \n
  • \n
\n

The following operations are related to\n GetBucketLifecycleConfiguration:

\n ", "smithy.api#examples": [ { "title": "To get lifecycle configuration on a bucket", @@ -20068,6 +22008,11 @@ "method": "GET", "uri": "/{Bucket}?lifecycle", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20105,7 +22050,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20124,7 +22069,7 @@ }, "traits": { "aws.customizations#s3UnwrappedXmlOutput": {}, - "smithy.api#documentation": "

Returns the Region the bucket resides in. You set the bucket's Region using the\n LocationConstraint request parameter in a CreateBucket\n request. For more information, see CreateBucket.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n \n

We recommend that you use HeadBucket to return the Region\n that a bucket resides in. For backward compatibility, Amazon S3 continues to support\n GetBucketLocation.

\n
\n

The following operations are related to GetBucketLocation:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the Region the bucket resides in. You set the bucket's Region using the\n LocationConstraint request parameter in a CreateBucket\n request. For more information, see CreateBucket.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n \n

We recommend that you use HeadBucket to return the Region\n that a bucket resides in. For backward compatibility, Amazon S3 continues to support\n GetBucketLocation.

\n
\n

The following operations are related to GetBucketLocation:

\n ", "smithy.api#examples": [ { "title": "To get bucket location", @@ -20141,6 +22086,11 @@ "method": "GET", "uri": "/{Bucket}?location", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20165,7 +22115,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket for which to get the location.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", + "smithy.api#documentation": "

The name of the bucket for which to get the location.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -20176,7 +22126,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20194,11 +22144,16 @@ "target": "com.amazonaws.s3#GetBucketLoggingOutput" }, "traits": { - "smithy.api#documentation": "

Returns the logging status of a bucket and the permissions users have to view and modify\n that status.

\n

The following operations are related to GetBucketLogging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the logging status of a bucket and the permissions users have to view and modify\n that status.

\n

The following operations are related to GetBucketLogging:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?logging", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20231,7 +22186,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20249,11 +22204,16 @@ "target": "com.amazonaws.s3#GetBucketMetricsConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

Gets a metrics configuration (specified by the metrics configuration ID) from the\n bucket. Note that this doesn't include the daily storage metrics.

\n

To use this operation, you must have permissions to perform the\n s3:GetMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring\n Metrics with Amazon CloudWatch.

\n

The following operations are related to\n GetBucketMetricsConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Gets a metrics configuration (specified by the metrics configuration ID) from the\n bucket. Note that this doesn't include the daily storage metrics.

\n

To use this operation, you must have permissions to perform the\n s3:GetMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring\n Metrics with Amazon CloudWatch.

\n

The following operations are related to\n GetBucketMetricsConfiguration:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?metrics&x-id=GetBucketMetricsConfiguration", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20297,7 +22257,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20315,11 +22275,16 @@ "target": "com.amazonaws.s3#NotificationConfiguration" }, "traits": { - "smithy.api#documentation": "

Returns the notification configuration of a bucket.

\n

If notifications are not enabled on the bucket, the action returns an empty\n NotificationConfiguration element.

\n

By default, you must be the bucket owner to read the notification configuration of a\n bucket. However, the bucket owner can use a bucket policy to grant permission to other\n users to read this configuration with the s3:GetBucketNotification\n permission.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n

For more information about setting and reading the notification configuration on a\n bucket, see Setting Up Notification of Bucket Events. For more information about bucket\n policies, see Using Bucket Policies.

\n

The following action is related to GetBucketNotification:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the notification configuration of a bucket.

\n

If notifications are not enabled on the bucket, the action returns an empty\n NotificationConfiguration element.

\n

By default, you must be the bucket owner to read the notification configuration of a\n bucket. However, the bucket owner can use a bucket policy to grant permission to other\n users to read this configuration with the s3:GetBucketNotification\n permission.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n

For more information about setting and reading the notification configuration on a\n bucket, see Setting Up Notification of Bucket Events. For more information about bucket\n policies, see Using Bucket Policies.

\n

The following action is related to GetBucketNotification:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?notification", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20329,7 +22294,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket for which to get the notification configuration.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", + "smithy.api#documentation": "

The name of the bucket for which to get the notification configuration.

\n

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -20340,7 +22305,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20358,11 +22323,16 @@ "target": "com.amazonaws.s3#GetBucketOwnershipControlsOutput" }, "traits": { - "smithy.api#documentation": "

Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you\n must have the s3:GetBucketOwnershipControls permission. For more information\n about Amazon S3 permissions, see Specifying permissions in a\n policy.

\n

For information about Amazon S3 Object Ownership, see Using Object\n Ownership.

\n

The following operations are related to GetBucketOwnershipControls:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you\n must have the s3:GetBucketOwnershipControls permission. For more information\n about Amazon S3 permissions, see Specifying permissions in a\n policy.

\n

For information about Amazon S3 Object Ownership, see Using Object\n Ownership.

\n

The following operations are related to GetBucketOwnershipControls:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?ownershipControls", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20398,7 +22368,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20416,7 +22386,7 @@ "target": "com.amazonaws.s3#GetBucketPolicyOutput" }, "traits": { - "smithy.api#documentation": "

Returns the policy of a specified bucket. If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n GetBucketPolicy permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.

\n

If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403\n Access Denied error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed error.

\n \n

To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy, PutBucketPolicy, and\n DeleteBucketPolicy API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.

\n
\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n

For more information about bucket policies, see Using Bucket Policies and User\n Policies.

\n

The following action is related to GetBucketPolicy:

\n ", + "smithy.api#documentation": "

Returns the policy of a specified bucket.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the\n GetBucketPolicy permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.

\n

If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403\n Access Denied error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed error.

\n \n

To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy, PutBucketPolicy, and\n DeleteBucketPolicy API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.

\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The s3:GetBucketPolicy permission is required in a policy. \n For more information about general purpose buckets bucket policies, see Using Bucket Policies and User\n Policies in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n
  • \n
\n
\n
Example bucket policies
\n
\n

\n General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.

\n

\n Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following action is related to GetBucketPolicy:

\n ", "smithy.api#examples": [ { "title": "To get bucket policy", @@ -20433,6 +22403,11 @@ "method": "GET", "uri": "/{Bucket}?policy", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20457,7 +22432,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name for which to get the bucket policy.

\n

To use this API operation against an access point, provide the alias of the access point in place of the bucket name.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", + "smithy.api#documentation": "

The bucket name to get the bucket policy for.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

\n

\n Access points - When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

\n

\n Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -20468,7 +22443,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

\n \n

For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented.

\n
", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20486,11 +22461,16 @@ "target": "com.amazonaws.s3#GetBucketPolicyStatusOutput" }, "traits": { - "smithy.api#documentation": "

Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public.\n In order to use this operation, you must have the s3:GetBucketPolicyStatus\n permission. For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n

For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".

\n

The following operations are related to GetBucketPolicyStatus:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public.\n In order to use this operation, you must have the s3:GetBucketPolicyStatus\n permission. For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n

For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".

\n

The following operations are related to GetBucketPolicyStatus:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?policyStatus", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20526,7 +22506,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20544,7 +22524,7 @@ "target": "com.amazonaws.s3#GetBucketReplicationOutput" }, "traits": { - "smithy.api#documentation": "

Returns the replication configuration of a bucket.

\n \n

It can take a while to propagate the put or delete a replication configuration to\n all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong\n result.

\n
\n

For information about replication configuration, see Replication in the\n Amazon S3 User Guide.

\n

This action requires permissions for the s3:GetReplicationConfiguration\n action. For more information about permissions, see Using Bucket Policies and User\n Policies.

\n

If you include the Filter element in a replication configuration, you must\n also include the DeleteMarkerReplication and Priority elements.\n The response also returns those elements.

\n

For information about GetBucketReplication errors, see List of\n replication-related error codes\n

\n

The following operations are related to GetBucketReplication:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the replication configuration of a bucket.

\n \n

It can take a while to propagate the put or delete a replication configuration to\n all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong\n result.

\n
\n

For information about replication configuration, see Replication in the\n Amazon S3 User Guide.

\n

This action requires permissions for the s3:GetReplicationConfiguration\n action. For more information about permissions, see Using Bucket Policies and User\n Policies.

\n

If you include the Filter element in a replication configuration, you must\n also include the DeleteMarkerReplication and Priority elements.\n The response also returns those elements.

\n

For information about GetBucketReplication errors, see List of\n replication-related error codes\n

\n

The following operations are related to GetBucketReplication:

\n ", "smithy.api#examples": [ { "title": "To get replication configuration set on a bucket", @@ -20573,6 +22553,11 @@ "method": "GET", "uri": "/{Bucket}?replication", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20607,7 +22592,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20625,7 +22610,7 @@ "target": "com.amazonaws.s3#GetBucketRequestPaymentOutput" }, "traits": { - "smithy.api#documentation": "

Returns the request payment configuration of a bucket. To use this version of the\n operation, you must be the bucket owner. For more information, see Requester Pays\n Buckets.

\n

The following operations are related to GetBucketRequestPayment:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the request payment configuration of a bucket. To use this version of the\n operation, you must be the bucket owner. For more information, see Requester Pays\n Buckets.

\n

The following operations are related to GetBucketRequestPayment:

\n ", "smithy.api#examples": [ { "title": "To get bucket versioning configuration", @@ -20642,6 +22627,11 @@ "method": "GET", "uri": "/{Bucket}?requestPayment", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20677,7 +22667,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20695,7 +22685,7 @@ "target": "com.amazonaws.s3#GetBucketTaggingOutput" }, "traits": { - "smithy.api#documentation": "

Returns the tag set associated with the bucket.

\n

To use this operation, you must have permission to perform the\n s3:GetBucketTagging action. By default, the bucket owner has this\n permission and can grant this permission to others.

\n

\n GetBucketTagging has the following special error:

\n
    \n
  • \n

    Error code: NoSuchTagSet\n

    \n
      \n
    • \n

      Description: There is no tag set associated with the bucket.

      \n
    • \n
    \n
  • \n
\n

The following operations are related to GetBucketTagging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the tag set associated with the bucket.

\n

To use this operation, you must have permission to perform the\n s3:GetBucketTagging action. By default, the bucket owner has this\n permission and can grant this permission to others.

\n

\n GetBucketTagging has the following special error:

\n
    \n
  • \n

    Error code: NoSuchTagSet\n

    \n
      \n
    • \n

      Description: There is no tag set associated with the bucket.

      \n
    • \n
    \n
  • \n
\n

The following operations are related to GetBucketTagging:

\n ", "smithy.api#examples": [ { "title": "To get tag set associated with a bucket", @@ -20721,6 +22711,11 @@ "method": "GET", "uri": "/{Bucket}?tagging", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20757,7 +22752,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20775,7 +22770,7 @@ "target": "com.amazonaws.s3#GetBucketVersioningOutput" }, "traits": { - "smithy.api#documentation": "

Returns the versioning state of a bucket.

\n

To retrieve the versioning state of a bucket, you must be the bucket owner.

\n

This implementation also returns the MFA Delete status of the versioning state. If the\n MFA Delete status is enabled, the bucket owner must use an authentication\n device to change the versioning state of the bucket.

\n

The following operations are related to GetBucketVersioning:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the versioning state of a bucket.

\n

To retrieve the versioning state of a bucket, you must be the bucket owner.

\n

This implementation also returns the MFA Delete status of the versioning state. If the\n MFA Delete status is enabled, the bucket owner must use an authentication\n device to change the versioning state of the bucket.

\n

The following operations are related to GetBucketVersioning:

\n ", "smithy.api#examples": [ { "title": "To get bucket versioning configuration", @@ -20793,6 +22788,11 @@ "method": "GET", "uri": "/{Bucket}?versioning", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20835,7 +22835,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20853,7 +22853,7 @@ "target": "com.amazonaws.s3#GetBucketWebsiteOutput" }, "traits": { - "smithy.api#documentation": "

Returns the website configuration for a bucket. To host website on Amazon S3, you can\n configure a bucket as website by adding a website configuration. For more information about\n hosting websites, see Hosting Websites on Amazon S3.

\n

This GET action requires the S3:GetBucketWebsite permission. By default,\n only the bucket owner can read the bucket website configuration. However, bucket owners can\n allow other users to read the website configuration by writing a bucket policy granting\n them the S3:GetBucketWebsite permission.

\n

The following operations are related to GetBucketWebsite:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the website configuration for a bucket. To host website on Amazon S3, you can\n configure a bucket as website by adding a website configuration. For more information about\n hosting websites, see Hosting Websites on Amazon S3.

\n

This GET action requires the S3:GetBucketWebsite permission. By default,\n only the bucket owner can read the bucket website configuration. However, bucket owners can\n allow other users to read the website configuration by writing a bucket policy granting\n them the S3:GetBucketWebsite permission.

\n

The following operations are related to GetBucketWebsite:

\n ", "smithy.api#examples": [ { "title": "To get bucket website configuration", @@ -20875,6 +22875,11 @@ "method": "GET", "uri": "/{Bucket}?website", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -20928,7 +22933,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20963,7 +22968,7 @@ "SHA1" ] }, - "smithy.api#documentation": "

Retrieves objects from Amazon S3. To use GET, you must have READ\n access to the object. If you grant READ access to the anonymous user, you can\n return the object without using an authorization header.

\n

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg,\n you can name it photos/2006/February/sample.jpg.

\n

To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg, specify the resource as\n /photos/2006/February/sample.jpg. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg in the bucket named\n examplebucket, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host\n Header Bucket Specification.

\n

For more information about returning the ACL of an object, see GetObjectAcl.

\n

If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectState error. For information about restoring archived objects,\n see Restoring\n Archived Objects.

\n

Encryption request headers, like x-amz-server-side-encryption, should not\n be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use\n these types of keys, you’ll get an HTTP 400 Bad Request error.

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).

\n

Assuming you have the relevant permission to read object tags, the response also returns\n the x-amz-tagging-count header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.

\n
\n
Permissions
\n
\n

You need the relevant read object (or version) permission for this operation.\n For more information, see Specifying Permissions in\n a Policy. If the object that you request doesn’t exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket\n permission.

\n

If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 (Not Found) error.

\n

If you don’t have the s3:ListBucket permission, Amazon S3 returns an\n HTTP status code 403 (\"access denied\") error.

\n
\n
Versioning
\n
\n

By default, the GET action returns the current version of an\n object. To return a different version, use the versionId\n subresource.

\n \n
    \n
  • \n

    If you supply a versionId, you need the\n s3:GetObjectVersion permission to access a specific\n version of an object. If you request a specific version, you do not need\n to have the s3:GetObject permission. If you request the\n current version without a specific version ID, only\n s3:GetObject permission is required.\n s3:GetObjectVersion permission won't be required.

    \n
  • \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version is a delete marker, the response returns a 405 (Method Not Allowed) error and the Last-Modified: timestamp response header.

    \n
  • \n
\n
\n

For more information about versioning, see PutBucketVersioning.

\n
\n
Overriding Response Header Values
\n
\n

There are times when you want to override certain response header values in a\n GET response. For example, you might override the\n Content-Disposition response header value in your GET\n request.

\n

You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request,\n that is, when status code 200 OK is returned. The set of headers you can override\n using these parameters is a subset of the headers that Amazon S3 accepts when you\n create an object. The response headers that you can override for the\n GET response are Content-Type,\n Content-Language, Expires,\n Cache-Control, Content-Disposition, and\n Content-Encoding. To override these header values in the\n GET response, you use the following request parameters.

\n \n

You must sign the request, either using an Authorization header or a\n presigned URL, when using these parameters. They cannot be used with an\n unsigned (anonymous) request.

\n
\n
    \n
  • \n

    \n response-content-type\n

    \n
  • \n
  • \n

    \n response-content-language\n

    \n
  • \n
  • \n

    \n response-expires\n

    \n
  • \n
  • \n

    \n response-cache-control\n

    \n
  • \n
  • \n

    \n response-content-disposition\n

    \n
  • \n
  • \n

    \n response-content-encoding\n

    \n
  • \n
\n
\n
Overriding Response Header Values
\n
\n

If both of the If-Match and If-Unmodified-Since\n headers are present in the request as follows: If-Match condition\n evaluates to true, and; If-Unmodified-Since condition\n evaluates to false; then, S3 returns 200 OK and the data requested.

\n

If both of the If-None-Match and If-Modified-Since\n headers are present in the request as follows: If-None-Match\n condition evaluates to false, and; If-Modified-Since\n condition evaluates to true; then, S3 returns 304 Not Modified\n response code.

\n

For more information about conditional requests, see RFC 7232.

\n
\n
\n

The following operations are related to GetObject:

\n ", + "smithy.api#documentation": "

Retrieves an object from Amazon S3.

\n

In the GetObject request, specify the full key name for the object.

\n

\n General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg, specify the object key name as\n /photos/2006/February/sample.jpg. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg in the bucket named\n examplebucket, specify the object key name as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host\n Header Bucket Specification in the Amazon S3 User Guide.

\n

\n Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket--use1-az5--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject, you must have the READ\n access to the object (or version). If you grant READ access to the anonymous user, the GetObject operation \n returns the object without using an authorization header. For more information, see Specifying permissions in\n a policy in the Amazon S3 User Guide.

    \n

    If you include a versionId in your request header, you must have the\n s3:GetObjectVersion permission to access a specific\n version of an object. The s3:GetObject permission is not required in this scenario.

    \n

    If you request the\n current version of an object without a specific versionId in the request header, only\n the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario.\n

    \n

    If the object that you request doesn’t exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket\n permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns an\n HTTP status code 403 Access Denied error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Storage classes
\n
\n

If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the \n S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the \n S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState error. For information about restoring archived objects,\n see Restoring\n Archived Objects in the Amazon S3 User Guide.

\n

\n Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.

\n
\n
Encryption
\n
\n

Encryption request headers, like x-amz-server-side-encryption, should not\n be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses \n these types of keys, you’ll get an HTTP 400 Bad Request error.

\n
\n
Overriding response header values through the request
\n
\n

There are times when you want to override certain response header values of a\n GetObject response. For example, you might override the\n Content-Disposition response header value through your GetObject\n request.

\n

You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. \n The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. \n

\n

The response headers that you can override for the\n GetObject response are Cache-Control, Content-Disposition, \n Content-Encoding, Content-Language, Content-Type, and Expires.

\n

To override values for a set of response headers in the\n GetObject response, you can use the following query\n parameters in the request.

\n
    \n
  • \n

    \n response-cache-control\n

    \n
  • \n
  • \n

    \n response-content-disposition\n

    \n
  • \n
  • \n

    \n response-content-encoding\n

    \n
  • \n
  • \n

    \n response-content-language\n

    \n
  • \n
  • \n

    \n response-content-type\n

    \n
  • \n
  • \n

    \n response-expires\n

    \n
  • \n
\n \n

When you use these parameters, you must sign the request by using either an Authorization header or a\n presigned URL. These parameters cannot be used with an\n unsigned (anonymous) request.

\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to GetObject:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?x-id=GetObject", @@ -20985,7 +22990,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the access control list (ACL) of an object. To use this operation, you must have\n s3:GetObjectAcl permissions or READ_ACP access to the object.\n For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3\n User Guide\n

\n

This action is not supported by Amazon S3 on Outposts.

\n

By default, GET returns ACL information about the current version of an object. To\n return ACL information about a different version, use the versionId subresource.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership,\n requests to read ACLs are still supported and return the\n bucket-owner-full-control ACL with the owner being the account that\n created the bucket. For more information, see Controlling object\n ownership and disabling ACLs in the\n Amazon S3 User Guide.

\n
\n

The following operations are related to GetObjectAcl:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the access control list (ACL) of an object. To use this operation, you must have\n s3:GetObjectAcl permissions or READ_ACP access to the object.\n For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3\n User Guide\n

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

By default, GET returns ACL information about the current version of an object. To\n return ACL information about a different version, use the versionId subresource.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership,\n requests to read ACLs are still supported and return the\n bucket-owner-full-control ACL with the owner being the account that\n created the bucket. For more information, see Controlling object\n ownership and disabling ACLs in the\n Amazon S3 User Guide.

\n
\n

The following operations are related to GetObjectAcl:

\n ", "smithy.api#examples": [ { "title": "To retrieve object ACL", @@ -21077,7 +23082,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name that contains the object for which to get the ACL information.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name that contains the object for which to get the ACL information.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -21099,7 +23104,7 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

VersionId used to reference a specific version of the object.

", + "smithy.api#documentation": "

Version ID used to reference a specific version of the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpQuery": "versionId" } }, @@ -21112,7 +23117,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -21135,7 +23140,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves all the metadata from an object without returning the object itself. This\n action is useful if you're interested only in an object's metadata. To use\n GetObjectAttributes, you must have READ access to the object.

\n

\n GetObjectAttributes combines the functionality of HeadObject\n and ListParts. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes.

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.

\n \n
    \n
  • \n

    Encryption request headers, such as x-amz-server-side-encryption,\n should not be sent for GET requests if your object uses server-side encryption\n with Amazon Web Services KMS keys stored in Amazon Web Services Key Management Service (SSE-KMS) or\n server-side encryption with Amazon S3 managed keys (SSE-S3). If your object does use\n these types of keys, you'll get an HTTP 400 Bad Request error.

    \n
  • \n
  • \n

    The last modified property in this case is the creation date of the\n object.

    \n
  • \n
\n
\n

Consider the following when using request headers:

\n
    \n
  • \n

    If both of the If-Match and If-Unmodified-Since headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK and the data requested:

    \n
      \n
    • \n

      \n If-Match condition evaluates to true.

      \n
    • \n
    • \n

      \n If-Unmodified-Since condition evaluates to\n false.

      \n
    • \n
    \n
  • \n
  • \n

    If both of the If-None-Match and If-Modified-Since\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified:

    \n
      \n
    • \n

      \n If-None-Match condition evaluates to false.

      \n
    • \n
    • \n

      \n If-Modified-Since condition evaluates to\n true.

      \n
    • \n
    \n
  • \n
\n

For more information about conditional requests, see RFC 7232.

\n
\n
Permissions
\n
\n

The permissions that you need to use this operation depend on whether the\n bucket is versioned. If the bucket is versioned, you need both the\n s3:GetObjectVersion and s3:GetObjectVersionAttributes\n permissions for this operation. If the bucket is not versioned, you need the\n s3:GetObject and s3:GetObjectAttributes permissions.\n For more information, see Specifying Permissions in\n a Policy in the Amazon S3 User Guide. If the object\n that you request does not exist, the error Amazon S3 returns depends on whether you\n also have the s3:ListBucket permission.

\n
    \n
  • \n

    If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found (\"no such key\")\n error.

    \n
  • \n
  • \n

    If you don't have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden (\"access denied\")\n error.

    \n
  • \n
\n
\n
\n

The following actions are related to GetObjectAttributes:

\n ", + "smithy.api#documentation": "

Retrieves all the metadata from an object without returning the object itself. This\n operation is useful if you're interested only in an object's metadata.

\n

\n GetObjectAttributes combines the functionality of HeadObject\n and ListParts. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To use\n GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation with depend on whether the\n bucket is versioned. If the bucket is versioned, you need both the\n s3:GetObjectVersion and s3:GetObjectVersionAttributes\n permissions for this operation. If the bucket is not versioned, you need the\n s3:GetObject and s3:GetObjectAttributes permissions.\n For more information, see Specifying Permissions in\n a Policy in the Amazon S3 User Guide. If the object\n that you request does not exist, the error Amazon S3 returns depends on whether you\n also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found (\"no such key\")\n error.

      \n
    • \n
    • \n

      If you don't have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden (\"access denied\")\n error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a GET request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
\n
\n
Versioning
\n
\n

\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

\n
\n
Conditional request headers
\n
\n

Consider the following when using request headers:

\n
    \n
  • \n

    If both of the If-Match and If-Unmodified-Since headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK and the data requested:

    \n
      \n
    • \n

      \n If-Match condition evaluates to true.

      \n
    • \n
    • \n

      \n If-Unmodified-Since condition evaluates to\n false.

      \n
    • \n
    \n

    For more information about conditional requests, see RFC 7232.

    \n
  • \n
  • \n

    If both of the If-None-Match and If-Modified-Since\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified:

    \n
      \n
    • \n

      \n If-None-Match condition evaluates to false.

      \n
    • \n
    • \n

      \n If-Modified-Since condition evaluates to\n true.

      \n
    • \n
    \n

    For more information about conditional requests, see RFC 7232.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following actions are related to GetObjectAttributes:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?attributes", @@ -21149,7 +23154,7 @@ "DeleteMarker": { "target": "com.amazonaws.s3#DeleteMarker", "traits": { - "smithy.api#documentation": "

Specifies whether the object retrieved was (true) or was not\n (false) a delete marker. If false, this response header does\n not appear in the response.

", + "smithy.api#documentation": "

Specifies whether the object retrieved was (true) or was not\n (false) a delete marker. If false, this response header does\n not appear in the response.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-delete-marker" } }, @@ -21163,7 +23168,7 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

The version ID of the object.

", + "smithy.api#documentation": "

The version ID of the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-version-id" } }, @@ -21194,7 +23199,7 @@ "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

Provides the storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.

\n

For more information, see Storage Classes.

" + "smithy.api#documentation": "

Provides the storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.

\n

For more information, see Storage Classes.

\n \n

\n Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
" } }, "ObjectSize": { @@ -21245,7 +23250,7 @@ "Parts": { "target": "com.amazonaws.s3#PartsList", "traits": { - "smithy.api#documentation": "

A container for elements related to a particular part. A response can contain zero or\n more Parts elements.

", + "smithy.api#documentation": "

A container for elements related to a particular part. A response can contain zero or\n more Parts elements.

\n \n
    \n
  • \n

    \n General purpose buckets - For GetObjectAttributes, if a additional checksum (including x-amz-checksum-crc32, \n x-amz-checksum-crc32c, x-amz-checksum-sha1, or \n x-amz-checksum-sha256) isn't applied to the object specified in the request, the response doesn't return Part.

    \n
  • \n
  • \n

    \n Directory buckets - For GetObjectAttributes, no matter whether a additional checksum is applied to the object specified in the request, the response returns Part.

    \n
  • \n
\n
", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Part" } @@ -21261,7 +23266,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket that contains the object.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket that contains the object.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -21280,7 +23285,7 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

The version ID used to reference a specific version of the object.

", + "smithy.api#documentation": "

The version ID used to reference a specific version of the object.

\n \n

S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

\n
", "smithy.api#httpQuery": "versionId" } }, @@ -21301,21 +23306,21 @@ "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example, AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example, AES256).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, @@ -21328,7 +23333,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -21354,7 +23359,7 @@ "target": "com.amazonaws.s3#GetObjectLegalHoldOutput" }, "traits": { - "smithy.api#documentation": "

Gets an object's current legal hold status. For more information, see Locking\n Objects.

\n

This action is not supported by Amazon S3 on Outposts.

\n

The following action is related to GetObjectLegalHold:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Gets an object's current legal hold status. For more information, see Locking\n Objects.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

The following action is related to GetObjectLegalHold:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?legal-hold", @@ -21383,7 +23388,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object whose legal hold status you want to retrieve.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object whose legal hold status you want to retrieve.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -21415,7 +23420,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -21433,7 +23438,7 @@ "target": "com.amazonaws.s3#GetObjectLockConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock\n configuration will be applied by default to every new object placed in the specified\n bucket. For more information, see Locking Objects.

\n

The following action is related to GetObjectLockConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock\n configuration will be applied by default to every new object placed in the specified\n bucket. For more information, see Locking Objects.

\n

The following action is related to GetObjectLockConfiguration:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?object-lock", @@ -21462,7 +23467,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket whose Object Lock configuration you want to retrieve.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket whose Object Lock configuration you want to retrieve.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -21473,7 +23478,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -21496,35 +23501,35 @@ "DeleteMarker": { "target": "com.amazonaws.s3#DeleteMarker", "traits": { - "smithy.api#documentation": "

Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If\n false, this response header does not appear in the response.

", + "smithy.api#documentation": "

Indicates whether the object retrieved was (true) or was not (false) a Delete Marker. If\n false, this response header does not appear in the response.

\n \n
    \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version in the request is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-delete-marker" } }, "AcceptRanges": { "target": "com.amazonaws.s3#AcceptRanges", "traits": { - "smithy.api#documentation": "

Indicates that a range of bytes was specified.

", + "smithy.api#documentation": "

Indicates that a range of bytes was specified in the request.

", "smithy.api#httpHeader": "accept-ranges" } }, "Expiration": { "target": "com.amazonaws.s3#Expiration", "traits": { - "smithy.api#documentation": "

If the object expiration is configured (see PUT Bucket lifecycle), the response includes\n this header. It includes the expiry-date and rule-id key-value\n pairs providing object expiration information. The value of the rule-id is\n URL-encoded.

", + "smithy.api#documentation": "

If the object expiration is configured (see \n PutBucketLifecycleConfiguration\n ), the response includes\n this header. It includes the expiry-date and rule-id key-value\n pairs providing object expiration information. The value of the rule-id is\n URL-encoded.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-expiration" } }, "Restore": { "target": "com.amazonaws.s3#Restore", "traits": { - "smithy.api#documentation": "

Provides information about object restoration action and expiration time of the restored\n object copy.

", + "smithy.api#documentation": "

Provides information about object restoration action and expiration time of the restored\n object copy.

\n \n

This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
", "smithy.api#httpHeader": "x-amz-restore" } }, "LastModified": { "target": "com.amazonaws.s3#LastModified", "traits": { - "smithy.api#documentation": "

Date and time when the object was last modified.

", + "smithy.api#documentation": "

Date and time when the object was last modified.

\n

\n General purpose buckets - When you specify a versionId of the object in your request, if the specified version in the request is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

", "smithy.api#httpHeader": "Last-Modified" } }, @@ -21545,42 +23550,42 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, "MissingMeta": { "target": "com.amazonaws.s3#MissingMeta", "traits": { - "smithy.api#documentation": "

This is set to the number of metadata entries not returned in x-amz-meta\n headers. This can happen if you create metadata using an API like SOAP that supports more\n flexible metadata than the REST API. For example, using SOAP, you can create metadata whose\n values are not legal HTTP headers.

", + "smithy.api#documentation": "

This is set to the number of metadata entries not returned in the headers that are prefixed with x-amz-meta-. This can happen if you create metadata using an API like SOAP that supports more\n flexible metadata than the REST API. For example, using SOAP, you can create metadata whose\n values are not legal HTTP headers.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-missing-meta" } }, "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

Version of the object.

", + "smithy.api#documentation": "

Version ID of the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-version-id" } }, @@ -21601,7 +23606,7 @@ "ContentEncoding": { "target": "com.amazonaws.s3#ContentEncoding", "traits": { - "smithy.api#documentation": "

Specifies what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

", + "smithy.api#documentation": "

Indicates what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

", "smithy.api#httpHeader": "Content-Encoding" } }, @@ -21636,14 +23641,14 @@ "WebsiteRedirectLocation": { "target": "com.amazonaws.s3#WebsiteRedirectLocation", "traits": { - "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata.

", + "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-website-redirect-location" } }, "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -21657,35 +23662,35 @@ "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header confirming the encryption algorithm used.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to confirm the encryption algorithm that's used.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide round-trip message integrity verification of\n the customer-provided encryption key.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide the round-trip message integrity verification of\n the customer-provided encryption key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

", + "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption with\n Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

Provides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.

", + "smithy.api#documentation": "

Provides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.

\n \n

\n Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
", "smithy.api#httpHeader": "x-amz-storage-class" } }, @@ -21698,7 +23703,7 @@ "ReplicationStatus": { "target": "com.amazonaws.s3#ReplicationStatus", "traits": { - "smithy.api#documentation": "

Amazon S3 can return this if your request involves a bucket that is either a source or\n destination in a replication rule.

", + "smithy.api#documentation": "

Amazon S3 can return this if your request involves a bucket that is either a source or\n destination in a replication rule.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-replication-status" } }, @@ -21712,28 +23717,28 @@ "TagCount": { "target": "com.amazonaws.s3#TagCount", "traits": { - "smithy.api#documentation": "

The number of tags, if any, on the object.

", + "smithy.api#documentation": "

The number of tags, if any, on the object, when you have the relevant permission to read object tags.

\n

You can use GetObjectTagging to retrieve\n the tag set associated with an object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-tagging-count" } }, "ObjectLockMode": { "target": "com.amazonaws.s3#ObjectLockMode", "traits": { - "smithy.api#documentation": "

The Object Lock mode currently in place for this object.

", + "smithy.api#documentation": "

The Object Lock mode that's currently in place for this object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-mode" } }, "ObjectLockRetainUntilDate": { "target": "com.amazonaws.s3#ObjectLockRetainUntilDate", "traits": { - "smithy.api#documentation": "

The date and time when this object's Object Lock will expire.

", + "smithy.api#documentation": "

The date and time when this object's Object Lock will expire.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-retain-until-date" } }, "ObjectLockLegalHoldStatus": { "target": "com.amazonaws.s3#ObjectLockLegalHoldStatus", "traits": { - "smithy.api#documentation": "

Indicates whether this object has an active legal hold. This field is only returned if\n you have permission to view an object's legal hold status.

", + "smithy.api#documentation": "

Indicates whether this object has an active legal hold. This field is only returned if\n you have permission to view an object's legal hold status.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-legal-hold" } } @@ -21748,7 +23753,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

\n Object Lambda access points - When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -21759,28 +23764,28 @@ "IfMatch": { "target": "com.amazonaws.s3#IfMatch", "traits": { - "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is the same as the one specified;\n otherwise, return a 412 (precondition failed) error.

", + "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is the same as the one specified in this header;\n otherwise, return a 412 Precondition Failed error.

\n

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition \n evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-Match" } }, "IfModifiedSince": { "target": "com.amazonaws.s3#IfModifiedSince", "traits": { - "smithy.api#documentation": "

Return the object only if it has been modified since the specified time; otherwise,\n return a 304 (not modified) error.

", + "smithy.api#documentation": "

Return the object only if it has been modified since the specified time; otherwise,\n return a 304 Not Modified error.

\n

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match \n condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified \n status code.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-Modified-Since" } }, "IfNoneMatch": { "target": "com.amazonaws.s3#IfNoneMatch", "traits": { - "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is different from the one specified;\n otherwise, return a 304 (not modified) error.

", + "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is different from the one specified in this header;\n otherwise, return a 304 Not Modified error.

\n

If both of the If-None-Match and If-Modified-Since \n headers are present in the request as follows: If-None-Match \n condition evaluates to false, and; If-Modified-Since \n condition evaluates to true; then, S3 returns 304 Not Modified HTTP status code.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-None-Match" } }, "IfUnmodifiedSince": { "target": "com.amazonaws.s3#IfUnmodifiedSince", "traits": { - "smithy.api#documentation": "

Return the object only if it has not been modified since the specified time; otherwise,\n return a 412 (precondition failed) error.

", + "smithy.api#documentation": "

Return the object only if it has not been modified since the specified time; otherwise,\n return a 412 Precondition Failed error.

\n

If both of the If-Match and If-Unmodified-Since\n headers are present in the request as follows: If-Match condition\n evaluates to true, and; If-Unmodified-Since condition\n evaluates to false; then, S3 returns 200 OK and the data requested.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-Unmodified-Since" } }, @@ -21798,7 +23803,7 @@ "Range": { "target": "com.amazonaws.s3#Range", "traits": { - "smithy.api#documentation": "

Downloads the specified range bytes of an object. For more information about the HTTP\n Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range.

\n \n

Amazon S3 doesn't support retrieving multiple ranges of data per GET\n request.

\n
", + "smithy.api#documentation": "

Downloads the specified byte range of an object. For more information about the HTTP\n Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range.

\n \n

Amazon S3 doesn't support retrieving multiple ranges of data per GET\n request.

\n
", "smithy.api#httpHeader": "Range" } }, @@ -21812,7 +23817,7 @@ "ResponseContentDisposition": { "target": "com.amazonaws.s3#ResponseContentDisposition", "traits": { - "smithy.api#documentation": "

Sets the Content-Disposition header of the response

", + "smithy.api#documentation": "

Sets the Content-Disposition header of the response.

", "smithy.api#httpQuery": "response-content-disposition" } }, @@ -21847,28 +23852,28 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

VersionId used to reference a specific version of the object.

", + "smithy.api#documentation": "

Version ID used to reference a specific version of the object.

\n

By default, the GetObject operation returns the current version of an object. To return a different version, use the versionId subresource.

\n \n
    \n
  • \n

    If you include a versionId in your request header, you must have the s3:GetObjectVersion permission to access a specific version of an object. The s3:GetObject permission is not required in this scenario.

    \n
  • \n
  • \n

    If you request the current version of an object without a specific versionId in the request header, only the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario.

    \n
  • \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
\n
\n

For more information about versioning, see PutBucketVersioning.

", "smithy.api#httpQuery": "versionId" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use to when decrypting the object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when decrypting the object (for example,\n AES256).

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 used to encrypt the data. This\n value is used to decrypt the object when recovering it and must match the one used when\n storing the data. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key that you originally provided for Amazon S3 to encrypt the data before storing it. This\n value is used to decrypt the object when recovering it and must match the one used when\n storing the data. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, @@ -21888,7 +23893,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -21916,7 +23921,7 @@ "target": "com.amazonaws.s3#GetObjectRetentionOutput" }, "traits": { - "smithy.api#documentation": "

Retrieves an object's retention settings. For more information, see Locking\n Objects.

\n

This action is not supported by Amazon S3 on Outposts.

\n

The following action is related to GetObjectRetention:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Retrieves an object's retention settings. For more information, see Locking\n Objects.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

The following action is related to GetObjectRetention:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?retention", @@ -21945,7 +23950,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object whose retention settings you want to retrieve.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object whose retention settings you want to retrieve.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -21977,7 +23982,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -21995,7 +24000,7 @@ "target": "com.amazonaws.s3#GetObjectTaggingOutput" }, "traits": { - "smithy.api#documentation": "

Returns the tag-set of an object. You send the GET request against the tagging\n subresource associated with the object.

\n

To use this operation, you must have permission to perform the\n s3:GetObjectTagging action. By default, the GET action returns information\n about current version of an object. For a versioned bucket, you can have multiple versions\n of an object in your bucket. To retrieve tags of any other version, use the versionId query\n parameter. You also need permission for the s3:GetObjectVersionTagging\n action.

\n

By default, the bucket owner has this permission and can grant this permission to\n others.

\n

For information about the Amazon S3 object tagging feature, see Object Tagging.

\n

The following actions are related to GetObjectTagging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the tag-set of an object. You send the GET request against the tagging\n subresource associated with the object.

\n

To use this operation, you must have permission to perform the\n s3:GetObjectTagging action. By default, the GET action returns information\n about current version of an object. For a versioned bucket, you can have multiple versions\n of an object in your bucket. To retrieve tags of any other version, use the versionId query\n parameter. You also need permission for the s3:GetObjectVersionTagging\n action.

\n

By default, the bucket owner has this permission and can grant this permission to\n others.

\n

For information about the Amazon S3 object tagging feature, see Object Tagging.

\n

The following actions are related to GetObjectTagging:

\n ", "smithy.api#examples": [ { "title": "To retrieve tag set of a specific object version", @@ -22052,7 +24057,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object for which to get the tagging information.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object for which to get the tagging information.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -22078,7 +24083,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -22102,7 +24107,7 @@ "target": "com.amazonaws.s3#GetObjectTorrentOutput" }, "traits": { - "smithy.api#documentation": "

Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're\n distributing large files.

\n \n

You can get torrent only for objects that are less than 5 GB in size, and that are\n not encrypted using server-side encryption with a customer-provided encryption\n key.

\n
\n

To use GET, you must have READ access to the object.

\n

This action is not supported by Amazon S3 on Outposts.

\n

The following action is related to GetObjectTorrent:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're\n distributing large files.

\n \n

You can get torrent only for objects that are less than 5 GB in size, and that are\n not encrypted using server-side encryption with a customer-provided encryption\n key.

\n
\n

To use GET, you must have READ access to the object.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

The following action is related to GetObjectTorrent:

\n ", "smithy.api#examples": [ { "title": "To retrieve torrent files for an object", @@ -22174,7 +24179,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -22192,11 +24197,16 @@ "target": "com.amazonaws.s3#GetPublicAccessBlockOutput" }, "traits": { - "smithy.api#documentation": "

Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use\n this operation, you must have the s3:GetBucketPublicAccessBlock permission.\n For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n \n

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or\n an object, it checks the PublicAccessBlock configuration for both the\n bucket (or the bucket that contains the object) and the bucket owner's account. If the\n PublicAccessBlock settings are different between the bucket and the\n account, Amazon S3 uses the most restrictive combination of the bucket-level and\n account-level settings.

\n
\n

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

\n

The following operations are related to GetPublicAccessBlock:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use\n this operation, you must have the s3:GetBucketPublicAccessBlock permission.\n For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n \n

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or\n an object, it checks the PublicAccessBlock configuration for both the\n bucket (or the bucket that contains the object) and the bucket owner's account. If the\n PublicAccessBlock settings are different between the bucket and the\n account, Amazon S3 uses the most restrictive combination of the bucket-level and\n account-level settings.

\n
\n

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

\n

The following operations are related to GetPublicAccessBlock:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?publicAccessBlock", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -22232,7 +24242,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -22351,7 +24361,7 @@ "target": "com.amazonaws.s3#HeadBucketRequest" }, "output": { - "target": "smithy.api#Unit" + "target": "com.amazonaws.s3#HeadBucketOutput" }, "errors": [ { @@ -22359,7 +24369,7 @@ } ], "traits": { - "smithy.api#documentation": "

This action is useful to determine if a bucket exists and you have permission to access\n it. The action returns a 200 OK if the bucket exists and you have permission\n to access it.

\n

If the bucket does not exist or you do not have permission to access it, the\n HEAD request returns a generic 400 Bad Request, 403\n Forbidden or 404 Not Found code. A message body is not included, so\n you cannot determine the exception beyond these error codes.

\n

To use this operation, you must have permissions to perform the\n s3:ListBucket action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

To use this API operation against an access point, you must provide the alias of the access point in\n place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct\n requests to the access point hostname. The access point hostname takes the form\n AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.\n When using the Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For more\n information, see Using access points.

\n

To use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

", + "smithy.api#documentation": "

You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission\n to access it.

\n

If the bucket does not exist or you do not have permission to access it, the\n HEAD request returns a generic 400 Bad Request, 403\n Forbidden or 404 Not Found code. A message body is not included, so\n you cannot determine the exception beyond these error codes.

\n \n

\n Directory buckets - You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Authentication and authorization
\n
\n

All HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory bucket - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

\n \n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
", "smithy.api#examples": [ { "title": "To determine if bucket exists", @@ -22406,13 +24416,49 @@ } } }, + "com.amazonaws.s3#HeadBucketOutput": { + "type": "structure", + "members": { + "BucketLocationType": { + "target": "com.amazonaws.s3#LocationType", + "traits": { + "smithy.api#documentation": "

The type of location where the bucket is created.

\n \n

This functionality is only supported by directory buckets.

\n
", + "smithy.api#httpHeader": "x-amz-bucket-location-type" + } + }, + "BucketLocationName": { + "target": "com.amazonaws.s3#BucketLocationName", + "traits": { + "smithy.api#documentation": "

The name of the location where the bucket will be created.

\n

For directory buckets, the AZ ID of the Availability Zone where the bucket is created. An example AZ ID value is usw2-az2.

\n \n

This functionality is only supported by directory buckets.

\n
", + "smithy.api#httpHeader": "x-amz-bucket-location-name" + } + }, + "BucketRegion": { + "target": "com.amazonaws.s3#Region", + "traits": { + "smithy.api#documentation": "

The Region that the bucket is located.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#httpHeader": "x-amz-bucket-region" + } + }, + "AccessPointAlias": { + "target": "com.amazonaws.s3#AccessPointAlias", + "traits": { + "smithy.api#documentation": "

Indicates whether the bucket name used in the request is an access point alias.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#httpHeader": "x-amz-access-point-alias" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.s3#HeadBucketRequest": { "type": "structure", "members": { "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the\n bucket name. If the Object Lambda access point alias in a request is not valid, the error code\n InvalidAccessPointAliasError is returned. For more information about\n InvalidAccessPointAliasError, see List of Error\n Codes.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

\n Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -22423,7 +24469,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -22446,7 +24492,7 @@ } ], "traits": { - "smithy.api#documentation": "

The HEAD action retrieves metadata from an object without returning the\n object itself. This action is useful if you're only interested in an object's metadata. To\n use HEAD, you must have READ access to the object.

\n

A HEAD request has the same options as a GET action on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not\n Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. \n It's not possible to retrieve the exact exception of these error codes.

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).

\n \n
    \n
  • \n

    Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for GET requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). If your object does use these types of keys,\n you’ll get an HTTP 400 Bad Request error.

    \n
  • \n
  • \n

    The last modified property in this case is the creation date of the\n object.

    \n
  • \n
\n
\n

Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.

\n

Consider the following when using request headers:

\n
    \n
  • \n

    Consideration 1 – If both of the If-Match and\n If-Unmodified-Since headers are present in the request as\n follows:

    \n
      \n
    • \n

      \n If-Match condition evaluates to true, and;

      \n
    • \n
    • \n

      \n If-Unmodified-Since condition evaluates to\n false;

      \n
    • \n
    \n

    Then Amazon S3 returns 200 OK and the data requested.

    \n
  • \n
  • \n

    Consideration 2 – If both of the If-None-Match and\n If-Modified-Since headers are present in the request as\n follows:

    \n
      \n
    • \n

      \n If-None-Match condition evaluates to false,\n and;

      \n
    • \n
    • \n

      \n If-Modified-Since condition evaluates to\n true;

      \n
    • \n
    \n

    Then Amazon S3 returns the 304 Not Modified response code.

    \n
  • \n
\n

For more information about conditional requests, see RFC 7232.

\n
\n
Permissions
\n
\n

You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3. If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

\n
    \n
  • \n

    If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 error.

    \n
  • \n
  • \n

    If you don’t have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 error.

    \n
  • \n
\n
\n
Versioning
\n
\n
    \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version is a delete marker, the response returns a 405 (Method Not Allowed) error and the Last-Modified: timestamp response header.

    \n
  • \n
\n
\n
\n

The following actions are related to HeadObject:

\n ", + "smithy.api#documentation": "

The HEAD operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.

\n

A HEAD request has the same options as a GET operation on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not\n Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. \n It's not possible to retrieve the exact exception of these error codes.

\n

Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

\n
    \n
  • \n

    \n General purpose bucket permissions - To\n use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.

    \n

    If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
\n
\n
Versioning
\n
\n
    \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - Delete marker is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following actions are related to HeadObject:

\n ", "smithy.api#http": { "method": "HEAD", "uri": "/{Bucket}/{Key+}", @@ -22490,7 +24536,7 @@ "DeleteMarker": { "target": "com.amazonaws.s3#DeleteMarker", "traits": { - "smithy.api#documentation": "

Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If\n false, this response header does not appear in the response.

", + "smithy.api#documentation": "

Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If\n false, this response header does not appear in the response.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-delete-marker" } }, @@ -22504,21 +24550,21 @@ "Expiration": { "target": "com.amazonaws.s3#Expiration", "traits": { - "smithy.api#documentation": "

If the object expiration is configured (see PUT Bucket lifecycle), the response includes\n this header. It includes the expiry-date and rule-id key-value\n pairs providing object expiration information. The value of the rule-id is\n URL-encoded.

", + "smithy.api#documentation": "

If the object expiration is configured (see \n PutBucketLifecycleConfiguration\n ), the response includes\n this header. It includes the expiry-date and rule-id key-value\n pairs providing object expiration information. The value of the rule-id is\n URL-encoded.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-expiration" } }, "Restore": { "target": "com.amazonaws.s3#Restore", "traits": { - "smithy.api#documentation": "

If the object is an archived object (an object whose storage class is GLACIER), the\n response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.

\n

If an archive copy is already restored, the header value indicates when Amazon S3 is\n scheduled to delete the object copy. For example:

\n

\n x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 21 Dec 2012 00:00:00\n GMT\"\n

\n

If the object restoration is in progress, the header returns the value\n ongoing-request=\"true\".

\n

For more information about archiving objects, see Transitioning Objects: General Considerations.

", + "smithy.api#documentation": "

If the object is an archived object (an object whose storage class is GLACIER), the\n response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.

\n

If an archive copy is already restored, the header value indicates when Amazon S3 is\n scheduled to delete the object copy. For example:

\n

\n x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 21 Dec 2012 00:00:00\n GMT\"\n

\n

If the object restoration is in progress, the header returns the value\n ongoing-request=\"true\".

\n

For more information about archiving objects, see Transitioning Objects: General Considerations.

\n \n

This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
", "smithy.api#httpHeader": "x-amz-restore" } }, "ArchiveStatus": { "target": "com.amazonaws.s3#ArchiveStatus", "traits": { - "smithy.api#documentation": "

The archive state of the head object.

", + "smithy.api#documentation": "

The archive state of the head object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-archive-status" } }, @@ -22539,28 +24585,28 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, @@ -22574,14 +24620,14 @@ "MissingMeta": { "target": "com.amazonaws.s3#MissingMeta", "traits": { - "smithy.api#documentation": "

This is set to the number of metadata entries not returned in x-amz-meta\n headers. This can happen if you create metadata using an API like SOAP that supports more\n flexible metadata than the REST API. For example, using SOAP, you can create metadata whose\n values are not legal HTTP headers.

", + "smithy.api#documentation": "

This is set to the number of metadata entries not returned in x-amz-meta\n headers. This can happen if you create metadata using an API like SOAP that supports more\n flexible metadata than the REST API. For example, using SOAP, you can create metadata whose\n values are not legal HTTP headers.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-missing-meta" } }, "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

Version of the object.

", + "smithy.api#documentation": "

Version ID of the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-version-id" } }, @@ -22602,7 +24648,7 @@ "ContentEncoding": { "target": "com.amazonaws.s3#ContentEncoding", "traits": { - "smithy.api#documentation": "

Specifies what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

", + "smithy.api#documentation": "

Indicates what content encodings have been applied to the object and thus what decoding\n mechanisms must be applied to obtain the media-type referenced by the Content-Type header\n field.

", "smithy.api#httpHeader": "Content-Encoding" } }, @@ -22630,14 +24676,14 @@ "WebsiteRedirectLocation": { "target": "com.amazonaws.s3#WebsiteRedirectLocation", "traits": { - "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata.

", + "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-website-redirect-location" } }, "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -22651,35 +24697,35 @@ "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header confirming the encryption algorithm used.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to confirm the encryption algorithm that's used.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide round-trip message integrity verification of\n the customer-provided encryption key.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide the round-trip message integrity verification of\n the customer-provided encryption key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

", + "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption with\n Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

Provides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.

\n

For more information, see Storage Classes.

", + "smithy.api#documentation": "

Provides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.

\n

For more information, see Storage Classes.

\n \n

\n Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
", "smithy.api#httpHeader": "x-amz-storage-class" } }, @@ -22692,7 +24738,7 @@ "ReplicationStatus": { "target": "com.amazonaws.s3#ReplicationStatus", "traits": { - "smithy.api#documentation": "

Amazon S3 can return this header if your request involves a bucket that is either a source or\n a destination in a replication rule.

\n

In replication, you have a source bucket on which you configure replication and\n destination bucket or buckets where Amazon S3 stores object replicas. When you request an object\n (GetObject) or object metadata (HeadObject) from these\n buckets, Amazon S3 will return the x-amz-replication-status header in the response\n as follows:

\n
    \n
  • \n

    \n If requesting an object from the source bucket,\n Amazon S3 will return the x-amz-replication-status header if the object in\n your request is eligible for replication.

    \n

    For example, suppose that in your replication configuration, you specify object\n prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix\n TaxDocs. Any objects you upload with this key name prefix, for\n example TaxDocs/document1.pdf, are eligible for replication. For any\n object request with this key name prefix, Amazon S3 will return the\n x-amz-replication-status header with value PENDING, COMPLETED or\n FAILED indicating object replication status.

    \n
  • \n
  • \n

    \n If requesting an object from a destination\n bucket, Amazon S3 will return the x-amz-replication-status header\n with value REPLICA if the object in your request is a replica that Amazon S3 created and\n there is no replica modification replication in progress.

    \n
  • \n
  • \n

    \n When replicating objects to multiple destination\n buckets, the x-amz-replication-status header acts\n differently. The header of the source object will only return a value of COMPLETED\n when replication is successful to all destinations. The header will remain at value\n PENDING until replication has completed for all destinations. If one or more\n destinations fails replication the header will return FAILED.

    \n
  • \n
\n

For more information, see Replication.

", + "smithy.api#documentation": "

Amazon S3 can return this header if your request involves a bucket that is either a source or\n a destination in a replication rule.

\n

In replication, you have a source bucket on which you configure replication and\n destination bucket or buckets where Amazon S3 stores object replicas. When you request an object\n (GetObject) or object metadata (HeadObject) from these\n buckets, Amazon S3 will return the x-amz-replication-status header in the response\n as follows:

\n
    \n
  • \n

    \n If requesting an object from the source bucket,\n Amazon S3 will return the x-amz-replication-status header if the object in\n your request is eligible for replication.

    \n

    For example, suppose that in your replication configuration, you specify object\n prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix\n TaxDocs. Any objects you upload with this key name prefix, for\n example TaxDocs/document1.pdf, are eligible for replication. For any\n object request with this key name prefix, Amazon S3 will return the\n x-amz-replication-status header with value PENDING, COMPLETED or\n FAILED indicating object replication status.

    \n
  • \n
  • \n

    \n If requesting an object from a destination\n bucket, Amazon S3 will return the x-amz-replication-status header\n with value REPLICA if the object in your request is a replica that Amazon S3 created and\n there is no replica modification replication in progress.

    \n
  • \n
  • \n

    \n When replicating objects to multiple destination\n buckets, the x-amz-replication-status header acts\n differently. The header of the source object will only return a value of COMPLETED\n when replication is successful to all destinations. The header will remain at value\n PENDING until replication has completed for all destinations. If one or more\n destinations fails replication the header will return FAILED.

    \n
  • \n
\n

For more information, see Replication.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-replication-status" } }, @@ -22706,21 +24752,21 @@ "ObjectLockMode": { "target": "com.amazonaws.s3#ObjectLockMode", "traits": { - "smithy.api#documentation": "

The Object Lock mode, if any, that's in effect for this object. This header is only\n returned if the requester has the s3:GetObjectRetention permission. For more\n information about S3 Object Lock, see Object Lock.

", + "smithy.api#documentation": "

The Object Lock mode, if any, that's in effect for this object. This header is only\n returned if the requester has the s3:GetObjectRetention permission. For more\n information about S3 Object Lock, see Object Lock.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-mode" } }, "ObjectLockRetainUntilDate": { "target": "com.amazonaws.s3#ObjectLockRetainUntilDate", "traits": { - "smithy.api#documentation": "

The date and time when the Object Lock retention period expires. This header is only\n returned if the requester has the s3:GetObjectRetention permission.

", + "smithy.api#documentation": "

The date and time when the Object Lock retention period expires. This header is only\n returned if the requester has the s3:GetObjectRetention permission.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-retain-until-date" } }, "ObjectLockLegalHoldStatus": { "target": "com.amazonaws.s3#ObjectLockLegalHoldStatus", "traits": { - "smithy.api#documentation": "

Specifies whether a legal hold is in effect for this object. This header is only\n returned if the requester has the s3:GetObjectLegalHold permission. This\n header is not returned if the specified version of this object has never had a legal hold\n applied. For more information about S3 Object Lock, see Object Lock.

", + "smithy.api#documentation": "

Specifies whether a legal hold is in effect for this object. This header is only\n returned if the requester has the s3:GetObjectLegalHold permission. This\n header is not returned if the specified version of this object has never had a legal hold\n applied. For more information about S3 Object Lock, see Object Lock.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-legal-hold" } } @@ -22735,7 +24781,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket containing the object.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket that contains the object.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -22746,28 +24792,28 @@ "IfMatch": { "target": "com.amazonaws.s3#IfMatch", "traits": { - "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is the same as the one specified;\n otherwise, return a 412 (precondition failed) error.

", + "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is the same as the one specified;\n otherwise, return a 412 (precondition failed) error.

\n

If both of the If-Match and\n If-Unmodified-Since headers are present in the request as\n follows:

\n
    \n
  • \n

    \n If-Match condition evaluates to true, and;

    \n
  • \n
  • \n

    \n If-Unmodified-Since condition evaluates to\n false;

    \n
  • \n
\n

Then Amazon S3 returns 200 OK and the data requested.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-Match" } }, "IfModifiedSince": { "target": "com.amazonaws.s3#IfModifiedSince", "traits": { - "smithy.api#documentation": "

Return the object only if it has been modified since the specified time; otherwise,\n return a 304 (not modified) error.

", + "smithy.api#documentation": "

Return the object only if it has been modified since the specified time; otherwise,\n return a 304 (not modified) error.

\n

If both of the If-None-Match and\n If-Modified-Since headers are present in the request as\n follows:

\n
    \n
  • \n

    \n If-None-Match condition evaluates to false,\n and;

    \n
  • \n
  • \n

    \n If-Modified-Since condition evaluates to\n true;

    \n
  • \n
\n

Then Amazon S3 returns the 304 Not Modified response code.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-Modified-Since" } }, "IfNoneMatch": { "target": "com.amazonaws.s3#IfNoneMatch", "traits": { - "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is different from the one specified;\n otherwise, return a 304 (not modified) error.

", + "smithy.api#documentation": "

Return the object only if its entity tag (ETag) is different from the one specified;\n otherwise, return a 304 (not modified) error.

\n

If both of the If-None-Match and\n If-Modified-Since headers are present in the request as\n follows:

\n
    \n
  • \n

    \n If-None-Match condition evaluates to false,\n and;

    \n
  • \n
  • \n

    \n If-Modified-Since condition evaluates to\n true;

    \n
  • \n
\n

Then Amazon S3 returns the 304 Not Modified response code.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-None-Match" } }, "IfUnmodifiedSince": { "target": "com.amazonaws.s3#IfUnmodifiedSince", "traits": { - "smithy.api#documentation": "

Return the object only if it has not been modified since the specified time; otherwise,\n return a 412 (precondition failed) error.

", + "smithy.api#documentation": "

Return the object only if it has not been modified since the specified time; otherwise,\n return a 412 (precondition failed) error.

\n

If both of the If-Match and\n If-Unmodified-Since headers are present in the request as\n follows:

\n
    \n
  • \n

    \n If-Match condition evaluates to true, and;

    \n
  • \n
  • \n

    \n If-Unmodified-Since condition evaluates to\n false;

    \n
  • \n
\n

Then Amazon S3 returns 200 OK and the data requested.

\n

For more information about conditional requests, see RFC 7232.

", "smithy.api#httpHeader": "If-Unmodified-Since" } }, @@ -22792,28 +24838,28 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

VersionId used to reference a specific version of the object.

", + "smithy.api#documentation": "

Version ID used to reference a specific version of the object.

\n \n

For directory buckets in this API operation, only the null value of the version ID is supported.

\n
", "smithy.api#httpQuery": "versionId" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use to when encrypting the object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example,\n AES256).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, @@ -22833,7 +24879,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -22897,13 +24943,13 @@ "ID": { "target": "com.amazonaws.s3#ID", "traits": { - "smithy.api#documentation": "

If the principal is an Amazon Web Services account, it provides the Canonical User ID. If the\n principal is an IAM User, it provides a user ARN value.

" + "smithy.api#documentation": "

If the principal is an Amazon Web Services account, it provides the Canonical User ID. If the\n principal is an IAM User, it provides a user ARN value.

\n \n

\n Directory buckets - If the principal is an Amazon Web Services account, it provides the Amazon Web Services account ID. If the\n principal is an IAM User, it provides a user ARN value.

\n
" } }, "DisplayName": { "target": "com.amazonaws.s3#DisplayName", "traits": { - "smithy.api#documentation": "

Name of the Principal.

" + "smithy.api#documentation": "

Name of the Principal.

\n \n

This functionality is not supported for directory buckets.

\n
" } } }, @@ -23082,8 +25128,9 @@ } }, "traits": { - "smithy.api#documentation": "

Object is archived and inaccessible until restored.

", - "smithy.api#error": "client" + "smithy.api#documentation": "

Object is archived and inaccessible until restored.

\n

If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the \n S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the \n S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState error. For information about restoring archived objects,\n see Restoring\n Archived Objects in the Amazon S3 User Guide.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 } }, "com.amazonaws.s3#InventoryConfiguration": { @@ -23705,11 +25752,16 @@ "target": "com.amazonaws.s3#ListBucketAnalyticsConfigurationsOutput" }, "traits": { - "smithy.api#documentation": "

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics\n configurations per bucket.

\n

This action supports list pagination and does not return more than 100 configurations at\n a time. You should always check the IsTruncated element in the response. If\n there are no more configurations to list, IsTruncated is set to false. If\n there are more configurations to list, IsTruncated is set to true, and there\n will be a value in NextContinuationToken. You use the\n NextContinuationToken value to continue the pagination of the list by\n passing the value in continuation-token in the request to GET the next\n page.

\n

To use this operation, you must have permissions to perform the\n s3:GetAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis.

\n

The following operations are related to\n ListBucketAnalyticsConfigurations:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics\n configurations per bucket.

\n

This action supports list pagination and does not return more than 100 configurations at\n a time. You should always check the IsTruncated element in the response. If\n there are no more configurations to list, IsTruncated is set to false. If\n there are more configurations to list, IsTruncated is set to true, and there\n will be a value in NextContinuationToken. You use the\n NextContinuationToken value to continue the pagination of the list by\n passing the value in continuation-token in the request to GET the next\n page.

\n

To use this operation, you must have permissions to perform the\n s3:GetAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis.

\n

The following operations are related to\n ListBucketAnalyticsConfigurations:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?analytics&x-id=ListBucketAnalyticsConfigurations", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -23772,7 +25824,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -23790,11 +25842,16 @@ "target": "com.amazonaws.s3#ListBucketIntelligentTieringConfigurationsOutput" }, "traits": { - "smithy.api#documentation": "

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to ListBucketIntelligentTieringConfigurations include:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to ListBucketIntelligentTieringConfigurations include:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -23867,11 +25924,16 @@ "target": "com.amazonaws.s3#ListBucketInventoryConfigurationsOutput" }, "traits": { - "smithy.api#documentation": "

Returns a list of inventory configurations for the bucket. You can have up to 1,000\n analytics configurations per bucket.

\n

This action supports list pagination and does not return more than 100 configurations at\n a time. Always check the IsTruncated element in the response. If there are no\n more configurations to list, IsTruncated is set to false. If there are more\n configurations to list, IsTruncated is set to true, and there is a value in\n NextContinuationToken. You use the NextContinuationToken value\n to continue the pagination of the list by passing the value in continuation-token in the\n request to GET the next page.

\n

To use this operation, you must have permissions to perform the\n s3:GetInventoryConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory\n

\n

The following operations are related to\n ListBucketInventoryConfigurations:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns a list of inventory configurations for the bucket. You can have up to 1,000\n analytics configurations per bucket.

\n

This action supports list pagination and does not return more than 100 configurations at\n a time. Always check the IsTruncated element in the response. If there are no\n more configurations to list, IsTruncated is set to false. If there are more\n configurations to list, IsTruncated is set to true, and there is a value in\n NextContinuationToken. You use the NextContinuationToken value\n to continue the pagination of the list by passing the value in continuation-token in the\n request to GET the next page.

\n

To use this operation, you must have permissions to perform the\n s3:GetInventoryConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory\n

\n

The following operations are related to\n ListBucketInventoryConfigurations:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?inventory&x-id=ListBucketInventoryConfigurations", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -23934,7 +25996,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -23952,7 +26014,7 @@ "target": "com.amazonaws.s3#ListBucketMetricsConfigurationsOutput" }, "traits": { - "smithy.api#documentation": "

Lists the metrics configurations for the bucket. The metrics configurations are only for\n the request metrics of the bucket and do not provide information on daily storage metrics.\n You can have up to 1,000 configurations per bucket.

\n

This action supports list pagination and does not return more than 100 configurations at\n a time. Always check the IsTruncated element in the response. If there are no\n more configurations to list, IsTruncated is set to false. If there are more\n configurations to list, IsTruncated is set to true, and there is a value in\n NextContinuationToken. You use the NextContinuationToken value\n to continue the pagination of the list by passing the value in\n continuation-token in the request to GET the next page.

\n

To use this operation, you must have permissions to perform the\n s3:GetMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For more information about metrics configurations and CloudWatch request metrics, see\n Monitoring Metrics with Amazon CloudWatch.

\n

The following operations are related to\n ListBucketMetricsConfigurations:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Lists the metrics configurations for the bucket. The metrics configurations are only for\n the request metrics of the bucket and do not provide information on daily storage metrics.\n You can have up to 1,000 configurations per bucket.

\n

This action supports list pagination and does not return more than 100 configurations at\n a time. Always check the IsTruncated element in the response. If there are no\n more configurations to list, IsTruncated is set to false. If there are more\n configurations to list, IsTruncated is set to true, and there is a value in\n NextContinuationToken. You use the NextContinuationToken value\n to continue the pagination of the list by passing the value in\n continuation-token in the request to GET the next page.

\n

To use this operation, you must have permissions to perform the\n s3:GetMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For more information about metrics configurations and CloudWatch request metrics, see\n Monitoring Metrics with Amazon CloudWatch.

\n

The following operations are related to\n ListBucketMetricsConfigurations:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?metrics&x-id=ListBucketMetricsConfigurations", @@ -24019,7 +26081,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -24037,7 +26099,7 @@ "target": "com.amazonaws.s3#ListBucketsOutput" }, "traits": { - "smithy.api#documentation": "

Returns a list of all buckets owned by the authenticated sender of the request. To use\n this operation, you must have the s3:ListAllMyBuckets permission.

\n

For information about Amazon S3 buckets, see Creating, configuring, and\n working with Amazon S3 buckets.

", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns a list of all buckets owned by the authenticated sender of the request. To use\n this operation, you must have the s3:ListAllMyBuckets permission.

\n

For information about Amazon S3 buckets, see Creating, configuring, and\n working with Amazon S3 buckets.

", "smithy.api#examples": [ { "title": "To list all buckets", @@ -24066,7 +26128,7 @@ ], "smithy.api#http": { "method": "GET", - "uri": "/", + "uri": "/?x-id=ListBuckets", "code": 200 } } @@ -24092,6 +26154,76 @@ "smithy.api#xmlName": "ListAllMyBucketsResult" } }, + "com.amazonaws.s3#ListDirectoryBuckets": { + "type": "operation", + "input": { + "target": "com.amazonaws.s3#ListDirectoryBucketsRequest" + }, + "output": { + "target": "com.amazonaws.s3#ListDirectoryBucketsOutput" + }, + "traits": { + "smithy.api#documentation": "

Returns a list of all Amazon S3 directory buckets owned by the authenticated sender of the request. For more information about directory buckets, see Directory buckets in the Amazon S3 User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

You must have the s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
", + "smithy.api#http": { + "method": "GET", + "uri": "/?x-id=ListDirectoryBuckets", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "ContinuationToken", + "outputToken": "ContinuationToken", + "items": "Buckets", + "pageSize": "MaxDirectoryBuckets" + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } + } + } + }, + "com.amazonaws.s3#ListDirectoryBucketsOutput": { + "type": "structure", + "members": { + "Buckets": { + "target": "com.amazonaws.s3#Buckets", + "traits": { + "smithy.api#documentation": "

The list of buckets owned by the requester.

" + } + }, + "ContinuationToken": { + "target": "com.amazonaws.s3#DirectoryBucketToken", + "traits": { + "smithy.api#documentation": "

If ContinuationToken was sent with the request, it is included in the\n response. You can use the returned ContinuationToken for pagination of the list response.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.s3#ListDirectoryBucketsRequest": { + "type": "structure", + "members": { + "ContinuationToken": { + "target": "com.amazonaws.s3#DirectoryBucketToken", + "traits": { + "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken is obfuscated and is not a real\n key. You can use this ContinuationToken for pagination of the list results.

", + "smithy.api#httpQuery": "continuation-token" + } + }, + "MaxDirectoryBuckets": { + "target": "com.amazonaws.s3#MaxDirectoryBuckets", + "traits": { + "smithy.api#documentation": "

Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.

", + "smithy.api#httpQuery": "max-directory-buckets" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.s3#ListMultipartUploads": { "type": "operation", "input": { @@ -24101,7 +26233,7 @@ "target": "com.amazonaws.s3#ListMultipartUploadsOutput" }, "traits": { - "smithy.api#documentation": "

This action lists in-progress multipart uploads. An in-progress multipart upload is a\n multipart upload that has been initiated using the Initiate Multipart Upload request, but\n has not yet been completed or aborted.

\n

This action returns at most 1,000 multipart uploads in the response. 1,000 multipart\n uploads is the maximum number of uploads a response can include, which is also the default\n value. You can further limit the number of uploads in a response by specifying the\n max-uploads parameter in the response. If additional multipart uploads\n satisfy the list criteria, the response will contain an IsTruncated element\n with the value true. To list the additional multipart uploads, use the\n key-marker and upload-id-marker request parameters.

\n

In the response, the uploads are sorted by key. If your application has initiated more\n than one multipart upload using the same object key, then uploads in the response are first\n sorted by key. Additionally, uploads are sorted in ascending order within each key by the\n upload initiation time.

\n

For more information on multipart uploads, see Uploading Objects Using Multipart\n Upload.

\n

For information on permissions required to use the multipart upload API, see Multipart Upload\n and Permissions.

\n

The following operations are related to ListMultipartUploads:

\n ", + "smithy.api#documentation": "

This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a\n multipart upload that has been initiated by the CreateMultipartUpload request, but\n has not yet been completed or aborted.

\n \n

\n Directory buckets - \n If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.\n

\n
\n

The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart\n uploads is also the default\n value. You can further limit the number of uploads in a response by specifying the\n max-uploads request parameter. If there are more than 1,000 multipart uploads that \n satisfy your ListMultipartUploads request, the response returns an IsTruncated element\n with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. \n To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. \n In these requests, include two query parameters: key-marker and upload-id-marker. \n Set the value of key-marker to the NextKeyMarker value from the previous response. \n Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

\n \n

\n Directory buckets - The upload-id-marker element and \n the NextUploadIdMarker element aren't supported by directory buckets. \n To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Sorting of multipart uploads in response
\n
\n
    \n
  • \n

    \n General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

    \n
      \n
    • \n

      Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

      \n
    • \n
    • \n

      Time-based sorting - For uploads that share the same object key, \n they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys. \n \n

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to ListMultipartUploads:

\n ", "smithy.api#examples": [ { "title": "To list in-progress multipart uploads on a bucket", @@ -24168,7 +26300,7 @@ "UploadIdMarker": { "target": "com.amazonaws.s3#UploadIdMarker", "traits": { - "smithy.api#documentation": "

Upload ID after which listing began.

" + "smithy.api#documentation": "

Upload ID after which listing began.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "NextKeyMarker": { @@ -24180,19 +26312,19 @@ "Prefix": { "target": "com.amazonaws.s3#Prefix", "traits": { - "smithy.api#documentation": "

When a prefix is provided in the request, this field contains the specified prefix. The\n result contains only keys starting with the specified prefix.

" + "smithy.api#documentation": "

When a prefix is provided in the request, this field contains the specified prefix. The\n result contains only keys starting with the specified prefix.

\n \n

\n Directory buckets - For directory buckets, only prefixes that end in a delimiter (/) are supported.

\n
" } }, "Delimiter": { "target": "com.amazonaws.s3#Delimiter", "traits": { - "smithy.api#documentation": "

Contains the delimiter you specified in the request. If you don't specify a delimiter in\n your request, this element is absent from the response.

" + "smithy.api#documentation": "

Contains the delimiter you specified in the request. If you don't specify a delimiter in\n your request, this element is absent from the response.

\n \n

\n Directory buckets - For directory buckets, / is the only supported delimiter.

\n
" } }, "NextUploadIdMarker": { "target": "com.amazonaws.s3#NextUploadIdMarker", "traits": { - "smithy.api#documentation": "

When a list is truncated, this element specifies the value that should be used for the\n upload-id-marker request parameter in a subsequent request.

" + "smithy.api#documentation": "

When a list is truncated, this element specifies the value that should be used for the\n upload-id-marker request parameter in a subsequent request.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "MaxUploads": { @@ -24218,7 +26350,7 @@ "CommonPrefixes": { "target": "com.amazonaws.s3#CommonPrefixList", "traits": { - "smithy.api#documentation": "

If you specify a delimiter in the request, then the result returns each distinct key\n prefix containing the delimiter in a CommonPrefixes element. The distinct key\n prefixes are returned in the Prefix child element.

", + "smithy.api#documentation": "

If you specify a delimiter in the request, then the result returns each distinct key\n prefix containing the delimiter in a CommonPrefixes element. The distinct key\n prefixes are returned in the Prefix child element.

\n \n

\n Directory buckets - For directory buckets, only prefixes that end in a delimiter (/) are supported.

\n
", "smithy.api#xmlFlattened": {} } }, @@ -24246,7 +26378,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket to which the multipart upload was initiated.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket to which the multipart upload was initiated.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -24257,7 +26389,7 @@ "Delimiter": { "target": "com.amazonaws.s3#Delimiter", "traits": { - "smithy.api#documentation": "

Character you use to group keys.

\n

All keys that contain the same string between the prefix, if specified, and the first\n occurrence of the delimiter after the prefix are grouped under a single result element,\n CommonPrefixes. If you don't specify the prefix parameter, then the\n substring starts at the beginning of the key. The keys that are grouped under\n CommonPrefixes result element are not returned elsewhere in the\n response.

", + "smithy.api#documentation": "

Character you use to group keys.

\n

All keys that contain the same string between the prefix, if specified, and the first\n occurrence of the delimiter after the prefix are grouped under a single result element,\n CommonPrefixes. If you don't specify the prefix parameter, then the\n substring starts at the beginning of the key. The keys that are grouped under\n CommonPrefixes result element are not returned elsewhere in the\n response.

\n \n

\n Directory buckets - For directory buckets, / is the only supported delimiter.

\n
", "smithy.api#httpQuery": "delimiter" } }, @@ -24270,7 +26402,7 @@ "KeyMarker": { "target": "com.amazonaws.s3#KeyMarker", "traits": { - "smithy.api#documentation": "

Together with upload-id-marker, this parameter specifies the multipart\n upload after which listing should begin.

\n

If upload-id-marker is not specified, only the keys lexicographically\n greater than the specified key-marker will be included in the list.

\n

If upload-id-marker is specified, any multipart uploads for a key equal to\n the key-marker might also be included, provided those multipart uploads have\n upload IDs lexicographically greater than the specified\n upload-id-marker.

", + "smithy.api#documentation": "

Specifies the multipart upload after which listing should begin.

\n \n
    \n
  • \n

    \n General purpose buckets - For general purpose buckets, key-marker \n is an object key. Together with upload-id-marker, this parameter specifies the multipart\n upload after which listing should begin.

    \n

    If upload-id-marker is not specified, only the keys lexicographically\n greater than the specified key-marker will be included in the list.

    \n

    If upload-id-marker is specified, any multipart uploads for a key equal to\n the key-marker might also be included, provided those multipart uploads have\n upload IDs lexicographically greater than the specified\n upload-id-marker.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, key-marker \n is obfuscated and isn't a real object key. \n The upload-id-marker parameter isn't supported by directory buckets. \n To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response. \n

    \n

    In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys. \n \n

    \n
  • \n
\n
", "smithy.api#httpQuery": "key-marker" } }, @@ -24284,7 +26416,7 @@ "Prefix": { "target": "com.amazonaws.s3#Prefix", "traits": { - "smithy.api#documentation": "

Lists in-progress uploads only for those keys that begin with the specified prefix. You\n can use prefixes to separate a bucket into different grouping of keys. (You can think of\n using prefix to make groups in the same way that you'd use a folder in a file\n system.)

", + "smithy.api#documentation": "

Lists in-progress uploads only for those keys that begin with the specified prefix. You\n can use prefixes to separate a bucket into different grouping of keys. (You can think of\n using prefix to make groups in the same way that you'd use a folder in a file\n system.)

\n \n

\n Directory buckets - For directory buckets, only prefixes that end in a delimiter (/) are supported.

\n
", "smithy.api#httpQuery": "prefix", "smithy.rules#contextParam": { "name": "Prefix" @@ -24294,14 +26426,14 @@ "UploadIdMarker": { "target": "com.amazonaws.s3#UploadIdMarker", "traits": { - "smithy.api#documentation": "

Together with key-marker, specifies the multipart upload after which listing should\n begin. If key-marker is not specified, the upload-id-marker parameter is ignored.\n Otherwise, any multipart uploads for a key equal to the key-marker might be included in the\n list only if they have an upload ID lexicographically greater than the specified\n upload-id-marker.

", + "smithy.api#documentation": "

Together with key-marker, specifies the multipart upload after which listing should\n begin. If key-marker is not specified, the upload-id-marker parameter is ignored.\n Otherwise, any multipart uploads for a key equal to the key-marker might be included in the\n list only if they have an upload ID lexicographically greater than the specified\n upload-id-marker.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpQuery": "upload-id-marker" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -24325,7 +26457,7 @@ "target": "com.amazonaws.s3#ListObjectVersionsOutput" }, "traits": { - "smithy.api#documentation": "

Returns metadata about all versions of the objects in a bucket. You can also use request\n parameters as selection criteria to return metadata about a subset of all the object\n versions.

\n \n

To use this operation, you must have permission to perform the\n s3:ListBucketVersions action. Be aware of the name difference.

\n
\n \n

A 200 OK response can contain valid or invalid XML. Make sure to design\n your application to parse the contents of the response and handle it\n appropriately.

\n
\n

To use this operation, you must have READ access to the bucket.

\n

The following operations are related to ListObjectVersions:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns metadata about all versions of the objects in a bucket. You can also use request\n parameters as selection criteria to return metadata about a subset of all the object\n versions.

\n \n

To use this operation, you must have permission to perform the\n s3:ListBucketVersions action. Be aware of the name difference.

\n
\n \n

A 200 OK response can contain valid or invalid XML. Make sure to design\n your application to parse the contents of the response and handle it\n appropriately.

\n
\n

To use this operation, you must have READ access to the bucket.

\n

The following operations are related to ListObjectVersions:

\n ", "smithy.api#examples": [ { "title": "To list object versions", @@ -24532,7 +26664,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -24568,7 +26700,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request\n parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK\n response can contain valid or invalid XML. Be sure to design your application to parse the\n contents of the response and handle it appropriately.

\n \n

This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility,\n Amazon S3 continues to support ListObjects.

\n
\n

The following operations are related to ListObjects:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request\n parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK\n response can contain valid or invalid XML. Be sure to design your application to parse the\n contents of the response and handle it appropriately.

\n \n

This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility,\n Amazon S3 continues to support ListObjects.

\n
\n

The following operations are related to ListObjects:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}", @@ -24659,7 +26791,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket containing the objects.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket containing the objects.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -24714,7 +26846,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -24744,7 +26876,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can\n use the request parameters as selection criteria to return a subset of the objects in a\n bucket. A 200 OK response can contain valid or invalid XML. Make sure to\n design your application to parse the contents of the response and handle it appropriately.\n Objects are returned sorted in an ascending order of the respective key names in the list.\n For more information about listing objects, see Listing object keys\n programmatically in the Amazon S3 User Guide.

\n

To use this operation, you must have READ access to the bucket.

\n

To use this action in an Identity and Access Management (IAM) policy, you must have permission to perform\n the s3:ListBucket action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n \n

This section describes the latest revision of this action. We recommend that you use\n this revised API operation for application development. For backward compatibility, Amazon S3\n continues to support the prior version of this API operation, ListObjects.

\n
\n

To get a list of your buckets, see ListBuckets.

\n

The following operations are related to ListObjectsV2:

\n ", + "smithy.api#documentation": "

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can\n use the request parameters as selection criteria to return a subset of the objects in a\n bucket. A 200 OK response can contain valid or invalid XML. Make sure to\n design your application to parse the contents of the response and handle it appropriately.\n \n For more information about listing objects, see Listing object keys\n programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform\n the s3:ListBucket action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Sorting order of returned objects
\n
\n
    \n
  • \n

    \n General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names.

    \n
  • \n
  • \n

    \n Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n \n

This section describes the latest revision of this action. We recommend that you use\n this revised API operation for application development. For backward compatibility, Amazon S3\n continues to support the prior version of this API operation, ListObjects.

\n
\n

The following operations are related to ListObjectsV2:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?list-type=2", @@ -24776,19 +26908,19 @@ "Name": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The bucket name.

" } }, "Prefix": { "target": "com.amazonaws.s3#Prefix", "traits": { - "smithy.api#documentation": "

Keys that begin with the indicated prefix.

" + "smithy.api#documentation": "

Keys that begin with the indicated prefix.

\n \n

\n Directory buckets - For directory buckets, only prefixes that end in a delimiter (/) are supported.

\n
" } }, "Delimiter": { "target": "com.amazonaws.s3#Delimiter", "traits": { - "smithy.api#documentation": "

Causes keys that contain the same string between the prefix and the first\n occurrence of the delimiter to be rolled up into a single result element in the\n CommonPrefixes collection. These rolled-up keys are not returned elsewhere\n in the response. Each rolled-up result counts as only one return against the\n MaxKeys value.

" + "smithy.api#documentation": "

Causes keys that contain the same string between the prefix and the first\n occurrence of the delimiter to be rolled up into a single result element in the\n CommonPrefixes collection. These rolled-up keys are not returned elsewhere\n in the response. Each rolled-up result counts as only one return against the\n MaxKeys value.

\n \n

\n Directory buckets - For directory buckets, / is the only supported delimiter.

\n
" } }, "MaxKeys": { @@ -24800,7 +26932,7 @@ "CommonPrefixes": { "target": "com.amazonaws.s3#CommonPrefixList", "traits": { - "smithy.api#documentation": "

All of the keys (up to 1,000) rolled up into a common prefix count as a single return\n when calculating the number of returns.

\n

A response can contain CommonPrefixes only if you specify a\n delimiter.

\n

\n CommonPrefixes contains all (if there are any) keys between\n Prefix and the next occurrence of the string specified by a\n delimiter.

\n

\n CommonPrefixes lists keys that act like subdirectories in the directory\n specified by Prefix.

\n

For example, if the prefix is notes/ and the delimiter is a slash\n (/) as in notes/summer/july, the common prefix is\n notes/summer/. All of the keys that roll up into a common prefix count as a\n single return when calculating the number of returns.

", + "smithy.api#documentation": "

All of the keys (up to 1,000) that share the same prefix are grouped together. When counting the total numbers of returns by this API operation, \n this group of keys is considered as one item.

\n

A response can contain CommonPrefixes only if you specify a\n delimiter.

\n

\n CommonPrefixes contains all (if there are any) keys between\n Prefix and the next occurrence of the string specified by a\n delimiter.

\n

\n CommonPrefixes lists keys that act like subdirectories in the directory\n specified by Prefix.

\n

For example, if the prefix is notes/ and the delimiter is a slash\n (/) as in notes/summer/july, the common prefix is\n notes/summer/. All of the keys that roll up into a common prefix count as a\n single return when calculating the number of returns.

\n \n
    \n
  • \n

    \n Directory buckets - For directory buckets, only prefixes that end in a delimiter (/) are supported.

    \n
  • \n
  • \n

    \n Directory buckets - When you query ListObjectsV2 with a delimiter during in-progress multipart uploads, the \n CommonPrefixes response parameter contains the prefixes that are associated with the in-progress multipart uploads. \n For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

    \n
  • \n
\n
", "smithy.api#xmlFlattened": {} } }, @@ -24819,7 +26951,7 @@ "ContinuationToken": { "target": "com.amazonaws.s3#Token", "traits": { - "smithy.api#documentation": "

If ContinuationToken was sent with the request, it is included in the\n response.

" + "smithy.api#documentation": "

If ContinuationToken was sent with the request, it is included in the\n response. You can use the returned ContinuationToken for pagination of the list response. You can use this ContinuationToken for pagination of the list results.

" } }, "NextContinuationToken": { @@ -24831,7 +26963,7 @@ "StartAfter": { "target": "com.amazonaws.s3#StartAfter", "traits": { - "smithy.api#documentation": "

If StartAfter was sent with the request, it is included in the response.

" + "smithy.api#documentation": "

If StartAfter was sent with the request, it is included in the response.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "RequestCharged": { @@ -24852,7 +26984,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

Bucket name to list.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -24863,7 +26995,7 @@ "Delimiter": { "target": "com.amazonaws.s3#Delimiter", "traits": { - "smithy.api#documentation": "

A delimiter is a character that you use to group keys.

", + "smithy.api#documentation": "

A delimiter is a character that you use to group keys.

\n \n
    \n
  • \n

    \n Directory buckets - For directory buckets, / is the only supported delimiter.

    \n
  • \n
  • \n

    \n Directory buckets - When you query ListObjectsV2 with a delimiter during in-progress multipart uploads, the \n CommonPrefixes response parameter contains the prefixes that are associated with the in-progress multipart uploads. \n For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

    \n
  • \n
\n
", "smithy.api#httpQuery": "delimiter" } }, @@ -24884,7 +27016,7 @@ "Prefix": { "target": "com.amazonaws.s3#Prefix", "traits": { - "smithy.api#documentation": "

Limits the response to keys that begin with the specified prefix.

", + "smithy.api#documentation": "

Limits the response to keys that begin with the specified prefix.

\n \n

\n Directory buckets - For directory buckets, only prefixes that end in a delimiter (/) are supported.

\n
", "smithy.api#httpQuery": "prefix", "smithy.rules#contextParam": { "name": "Prefix" @@ -24894,42 +27026,42 @@ "ContinuationToken": { "target": "com.amazonaws.s3#Token", "traits": { - "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken is obfuscated and is not a real\n key.

", + "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken is obfuscated and is not a real\n key. You can use this ContinuationToken for pagination of the list results.

", "smithy.api#httpQuery": "continuation-token" } }, "FetchOwner": { "target": "com.amazonaws.s3#FetchOwner", "traits": { - "smithy.api#documentation": "

The owner field is not present in ListObjectsV2 by default. If you want to\n return the owner field with each key in the result, then set the FetchOwner\n field to true.

", + "smithy.api#documentation": "

The owner field is not present in ListObjectsV2 by default. If you want to\n return the owner field with each key in the result, then set the FetchOwner\n field to true.

\n \n

\n Directory buckets - For directory buckets, the bucket owner is returned as the object owner for all objects.

\n
", "smithy.api#httpQuery": "fetch-owner" } }, "StartAfter": { "target": "com.amazonaws.s3#StartAfter", "traits": { - "smithy.api#documentation": "

StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this\n specified key. StartAfter can be any key in the bucket.

", + "smithy.api#documentation": "

StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this\n specified key. StartAfter can be any key in the bucket.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpQuery": "start-after" } }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { - "smithy.api#documentation": "

Confirms that the requester knows that she or he will be charged for the list objects\n request in V2 style. Bucket owners need not specify this parameter in their\n requests.

", + "smithy.api#documentation": "

Confirms that the requester knows that she or he will be charged for the list objects\n request in V2 style. Bucket owners need not specify this parameter in their\n requests.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-request-payer" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "OptionalObjectAttributes": { "target": "com.amazonaws.s3#OptionalObjectAttributesList", "traits": { - "smithy.api#documentation": "

Specifies the optional fields that you want returned in the response. Fields that you do\n not specify are not returned.

", + "smithy.api#documentation": "

Specifies the optional fields that you want returned in the response. Fields that you do\n not specify are not returned.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-optional-object-attributes" } } @@ -24947,7 +27079,7 @@ "target": "com.amazonaws.s3#ListPartsOutput" }, "traits": { - "smithy.api#documentation": "

Lists the parts that have been uploaded for a specific multipart upload. This operation\n must include the upload ID, which you obtain by sending the initiate multipart upload\n request (see CreateMultipartUpload).\n This request returns a maximum of 1,000 uploaded parts. The default number of parts\n returned is 1,000 parts. You can restrict the number of parts returned by specifying the\n max-parts request parameter. If your multipart upload consists of more than\n 1,000 parts, the response returns an IsTruncated field with the value of true,\n and a NextPartNumberMarker element. In subsequent ListParts\n requests you can include the part-number-marker query string parameter and set its value to\n the NextPartNumberMarker field value from the previous response.

\n

If the upload was created using a checksum algorithm, you will need to have permission\n to the kms:Decrypt action for the request to succeed.

\n

For more information on multipart uploads, see Uploading Objects Using Multipart\n Upload.

\n

For information on permissions required to use the multipart upload API, see Multipart Upload\n and Permissions.

\n

The following operations are related to ListParts:

\n ", + "smithy.api#documentation": "

Lists the parts that have been uploaded for a specific multipart upload.

\n

To use this operation, you must provide the upload ID in the request. You obtain this uploadID by sending the initiate multipart upload\n request through CreateMultipartUpload.

\n

The ListParts request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the\n max-parts request parameter. If your multipart upload consists of more than\n 1,000 parts, the response returns an IsTruncated field with the value of true,\n and a NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts\n requests, include the part-number-marker query string parameter and set its value to\n the NextPartNumberMarker field value from the previous response.

\n

For more information on multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n

    If the upload was created using server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission\n to the kms:Decrypt action for the ListParts request to succeed.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to ListParts:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?x-id=ListParts", @@ -24967,14 +27099,14 @@ "AbortDate": { "target": "com.amazonaws.s3#AbortDate", "traits": { - "smithy.api#documentation": "

If the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, then the response includes this header indicating when the initiated multipart\n upload will become eligible for abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.

\n

The response will also include the x-amz-abort-rule-id header that will\n provide the ID of the lifecycle configuration rule that defines this action.

", + "smithy.api#documentation": "

If the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, then the response includes this header indicating when the initiated multipart\n upload will become eligible for abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.

\n

The response will also include the x-amz-abort-rule-id header that will\n provide the ID of the lifecycle configuration rule that defines this action.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-abort-date" } }, "AbortRuleId": { "target": "com.amazonaws.s3#AbortRuleId", "traits": { - "smithy.api#documentation": "

This header is returned along with the x-amz-abort-date header. It\n identifies applicable lifecycle configuration rule that defines the action to abort\n incomplete multipart uploads.

", + "smithy.api#documentation": "

This header is returned along with the x-amz-abort-date header. It\n identifies applicable lifecycle configuration rule that defines the action to abort\n incomplete multipart uploads.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-abort-rule-id" } }, @@ -25023,7 +27155,7 @@ "Parts": { "target": "com.amazonaws.s3#Parts", "traits": { - "smithy.api#documentation": "

Container for elements related to a particular part. A response can contain zero or\n more Part elements.

", + "smithy.api#documentation": "

Container for elements related to a particular part. A response can contain zero or\n more Part elements.

", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Part" } @@ -25037,13 +27169,13 @@ "Owner": { "target": "com.amazonaws.s3#Owner", "traits": { - "smithy.api#documentation": "

Container element that identifies the object owner, after the object is created. If\n multipart upload is initiated by an IAM user, this element provides the parent account ID\n and display name.

" + "smithy.api#documentation": "

Container element that identifies the object owner, after the object is created. If\n multipart upload is initiated by an IAM user, this element provides the parent account ID\n and display name.

\n \n

\n Directory buckets - The bucket owner is returned as the object owner for all the parts.

\n
" } }, "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded\n object.

" + "smithy.api#documentation": "

The class of storage used to store the uploaded\n object.

\n \n

\n Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
" } }, "RequestCharged": { @@ -25070,7 +27202,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket to which the parts are being uploaded.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket to which the parts are being uploaded.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -25120,28 +27252,28 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created \n using a checksum algorithm. For more information,\n see Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created \n using a checksum algorithm. For more information,\n see Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. \n For more information, see\n Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. \n For more information, see\n Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum \n algorithm. For more information,\n see Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum \n algorithm. For more information,\n see Protecting data using SSE-C keys in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } } @@ -25153,9 +27285,43 @@ "com.amazonaws.s3#Location": { "type": "string" }, + "com.amazonaws.s3#LocationInfo": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.s3#LocationType", + "traits": { + "smithy.api#documentation": "

The type of location where the bucket will be created.

" + } + }, + "Name": { + "target": "com.amazonaws.s3#LocationNameAsString", + "traits": { + "smithy.api#documentation": "

The name of the location where the bucket will be created.

\n

For directory buckets, the AZ ID of the Availability Zone where the bucket will be created. An example AZ ID value is usw2-az2.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the location where the bucket will be created.

\n

For directory buckets, the location type is Availability Zone. For more information about directory buckets, see \n Directory buckets in the Amazon S3 User Guide.

\n \n

This functionality is only supported by directory buckets.

\n
" + } + }, + "com.amazonaws.s3#LocationNameAsString": { + "type": "string" + }, "com.amazonaws.s3#LocationPrefix": { "type": "string" }, + "com.amazonaws.s3#LocationType": { + "type": "enum", + "members": { + "AvailabilityZone": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AvailabilityZone" + } + } + } + }, "com.amazonaws.s3#LoggingEnabled": { "type": "structure", "members": { @@ -25233,6 +27399,15 @@ "com.amazonaws.s3#MaxAgeSeconds": { "type": "integer" }, + "com.amazonaws.s3#MaxDirectoryBuckets": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1000 + } + } + }, "com.amazonaws.s3#MaxKeys": { "type": "integer" }, @@ -25455,13 +27630,13 @@ "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

The class of storage used to store the object.

" + "smithy.api#documentation": "

The class of storage used to store the object.

\n \n

\n Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
" } }, "Owner": { "target": "com.amazonaws.s3#Owner", "traits": { - "smithy.api#documentation": "

Specifies the owner of the object that is part of the multipart upload.

" + "smithy.api#documentation": "

Specifies the owner of the object that is part of the multipart upload.

\n \n

\n Directory buckets - The bucket owner is returned as the object owner for all the objects.

\n
" } }, "Initiator": { @@ -25513,7 +27688,8 @@ "members": {}, "traits": { "smithy.api#documentation": "

The specified bucket does not exist.

", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 404 } }, "com.amazonaws.s3#NoSuchKey": { @@ -25521,7 +27697,8 @@ "members": {}, "traits": { "smithy.api#documentation": "

The specified key does not exist.

", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 404 } }, "com.amazonaws.s3#NoSuchUpload": { @@ -25529,7 +27706,8 @@ "members": {}, "traits": { "smithy.api#documentation": "

The specified multipart upload does not exist.

", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 404 } }, "com.amazonaws.s3#NoncurrentVersionExpiration": { @@ -25668,7 +27846,7 @@ "ETag": { "target": "com.amazonaws.s3#ETag", "traits": { - "smithy.api#documentation": "

The entity tag is a hash of the object. The ETag reflects changes only to the contents\n of an object, not its metadata. The ETag may or may not be an MD5 digest of the object\n data. Whether or not it is depends on how the object was created and how it is encrypted as\n described below:

\n
    \n
  • \n

    Objects created by the PUT Object, POST Object, or Copy operation, or through the\n Amazon Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that\n are an MD5 digest of their object data.

    \n
  • \n
  • \n

    Objects created by the PUT Object, POST Object, or Copy operation, or through the\n Amazon Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are\n not an MD5 digest of their object data.

    \n
  • \n
  • \n

    If an object is created by either the Multipart Upload or Part Copy operation, the\n ETag is not an MD5 digest, regardless of the method of encryption. If an object is\n larger than 16 MB, the Amazon Web Services Management Console will upload or copy that object as a\n Multipart Upload, and therefore the ETag will not be an MD5 digest.

    \n
  • \n
" + "smithy.api#documentation": "

The entity tag is a hash of the object. The ETag reflects changes only to the contents\n of an object, not its metadata. The ETag may or may not be an MD5 digest of the object\n data. Whether or not it is depends on how the object was created and how it is encrypted as\n described below:

\n
    \n
  • \n

    Objects created by the PUT Object, POST Object, or Copy operation, or through the\n Amazon Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that\n are an MD5 digest of their object data.

    \n
  • \n
  • \n

    Objects created by the PUT Object, POST Object, or Copy operation, or through the\n Amazon Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are\n not an MD5 digest of their object data.

    \n
  • \n
  • \n

    If an object is created by either the Multipart Upload or Part Copy operation, the\n ETag is not an MD5 digest, regardless of the method of encryption. If an object is\n larger than 16 MB, the Amazon Web Services Management Console will upload or copy that object as a\n Multipart Upload, and therefore the ETag will not be an MD5 digest.

    \n
  • \n
\n \n

\n Directory buckets - MD5 is not supported by directory buckets.

\n
" } }, "ChecksumAlgorithm": { @@ -25687,19 +27865,19 @@ "StorageClass": { "target": "com.amazonaws.s3#ObjectStorageClass", "traits": { - "smithy.api#documentation": "

The class of storage used to store the object.

" + "smithy.api#documentation": "

The class of storage used to store the object.

\n \n

\n Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
" } }, "Owner": { "target": "com.amazonaws.s3#Owner", "traits": { - "smithy.api#documentation": "

The owner of the object

" + "smithy.api#documentation": "

The owner of the object

\n \n

\n Directory buckets - The bucket owner is returned as the object owner.

\n
" } }, "RestoreStatus": { "target": "com.amazonaws.s3#RestoreStatus", "traits": { - "smithy.api#documentation": "

Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
" } } }, @@ -25712,7 +27890,8 @@ "members": {}, "traits": { "smithy.api#documentation": "

This action is not allowed against this storage tier.

", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 403 } }, "com.amazonaws.s3#ObjectAttributes": { @@ -25816,7 +27995,7 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

VersionId for the specific version of the object to delete.

" + "smithy.api#documentation": "

Version ID for the specific version of the object to delete.

\n \n

This functionality is not supported for directory buckets.

\n
" } } }, @@ -25991,7 +28170,8 @@ "members": {}, "traits": { "smithy.api#documentation": "

The source object of the COPY action is not in the active tier and is only stored in\n Amazon S3 Glacier.

", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 403 } }, "com.amazonaws.s3#ObjectOwnership": { @@ -26017,7 +28197,7 @@ } }, "traits": { - "smithy.api#documentation": "

The container element for object ownership for a bucket's ownership controls.

\n

BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket\n owner if the objects are uploaded with the bucket-owner-full-control canned\n ACL.

\n

ObjectWriter - The uploading account will own the object if the object is uploaded with\n the bucket-owner-full-control canned ACL.

\n

BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer affect\n permissions. The bucket owner automatically owns and has full control over every object in\n the bucket. The bucket only accepts PUT requests that don't specify an ACL or bucket owner\n full control ACLs, such as the bucket-owner-full-control canned ACL or an\n equivalent form of this ACL expressed in the XML format.

" + "smithy.api#documentation": "

The container element for object ownership for a bucket's ownership controls.

\n

\n BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket\n owner if the objects are uploaded with the bucket-owner-full-control canned\n ACL.

\n

\n ObjectWriter - The uploading account will own the object if the object is uploaded with\n the bucket-owner-full-control canned ACL.

\n

\n BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer affect\n permissions. The bucket owner automatically owns and has full control over every object in\n the bucket. The bucket only accepts PUT requests that don't specify an ACL or specify bucket owner\n full control ACLs (such as the predefined bucket-owner-full-control canned ACL or a custom ACL \n in XML format that grants the same permissions).

\n

By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. We recommend\n keeping ACLs disabled, except in uncommon use cases where you must control access for each object individually. For more information about S3 Object Ownership, see\n Controlling ownership of objects and disabling ACLs for your bucket in the Amazon S3 User Guide.\n

\n \n

This functionality is not supported for directory buckets. Directory buckets use the bucket owner enforced setting for S3 Object Ownership.

\n
" } }, "com.amazonaws.s3#ObjectPart": { @@ -26044,19 +28224,19 @@ "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -26135,6 +28315,12 @@ "traits": { "smithy.api#enumValue": "SNOW" } + }, + "EXPRESS_ONEZONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPRESS_ONEZONE" + } } } }, @@ -26284,7 +28470,7 @@ "DisplayName": { "target": "com.amazonaws.s3#DisplayName", "traits": { - "smithy.api#documentation": "

Container for the display name of the owner. This value is only supported in the\n following Amazon Web Services Regions:

\n
    \n
  • \n

    US East (N. Virginia)

    \n
  • \n
  • \n

    US West (N. California)

    \n
  • \n
  • \n

    US West (Oregon)

    \n
  • \n
  • \n

    Asia Pacific (Singapore)

    \n
  • \n
  • \n

    Asia Pacific (Sydney)

    \n
  • \n
  • \n

    Asia Pacific (Tokyo)

    \n
  • \n
  • \n

    Europe (Ireland)

    \n
  • \n
  • \n

    South America (São Paulo)

    \n
  • \n
" + "smithy.api#documentation": "

Container for the display name of the owner. This value is only supported in the\n following Amazon Web Services Regions:

\n
    \n
  • \n

    US East (N. Virginia)

    \n
  • \n
  • \n

    US West (N. California)

    \n
  • \n
  • \n

    US West (Oregon)

    \n
  • \n
  • \n

    Asia Pacific (Singapore)

    \n
  • \n
  • \n

    Asia Pacific (Sydney)

    \n
  • \n
  • \n

    Asia Pacific (Tokyo)

    \n
  • \n
  • \n

    Europe (Ireland)

    \n
  • \n
  • \n

    South America (São Paulo)

    \n
  • \n
\n \n

This functionality is not supported for directory buckets.

\n
" } }, "ID": { @@ -26389,13 +28575,13 @@ "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { @@ -26644,11 +28830,16 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a\n bucket-level feature that enables you to perform faster data transfers to Amazon S3.

\n

To use this operation, you must have permission to perform the\n s3:PutAccelerateConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

The Transfer Acceleration state of a bucket can be set to one of the following two\n values:

\n
    \n
  • \n

    Enabled – Enables accelerated data transfers to the bucket.

    \n
  • \n
  • \n

    Suspended – Disables accelerated data transfers to the bucket.

    \n
  • \n
\n

The GetBucketAccelerateConfiguration action returns the transfer acceleration state\n of a bucket.

\n

After setting the Transfer Acceleration state of a bucket to Enabled, it might take up\n to thirty minutes before the data transfer rates to the bucket increase.

\n

The name of the bucket used for Transfer Acceleration must be DNS-compliant and must\n not contain periods (\".\").

\n

For more information about transfer acceleration, see Transfer\n Acceleration.

\n

The following operations are related to\n PutBucketAccelerateConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a\n bucket-level feature that enables you to perform faster data transfers to Amazon S3.

\n

To use this operation, you must have permission to perform the\n s3:PutAccelerateConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

The Transfer Acceleration state of a bucket can be set to one of the following two\n values:

\n
    \n
  • \n

    Enabled – Enables accelerated data transfers to the bucket.

    \n
  • \n
  • \n

    Suspended – Disables accelerated data transfers to the bucket.

    \n
  • \n
\n

The GetBucketAccelerateConfiguration action returns the transfer acceleration state\n of a bucket.

\n

After setting the Transfer Acceleration state of a bucket to Enabled, it might take up\n to thirty minutes before the data transfer rates to the bucket increase.

\n

The name of the bucket used for Transfer Acceleration must be DNS-compliant and must\n not contain periods (\".\").

\n

For more information about transfer acceleration, see Transfer\n Acceleration.

\n

The following operations are related to\n PutBucketAccelerateConfiguration:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?accelerate", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -26678,14 +28869,14 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } } @@ -26707,7 +28898,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Sets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set the ACL of a\n bucket, you must have WRITE_ACP permission.

\n

You can use one of the following two ways to set a bucket's permissions:

\n
    \n
  • \n

    Specify the ACL in the request body

    \n
  • \n
  • \n

    Specify permissions using request headers

    \n
  • \n
\n \n

You cannot specify access permission using both the body and the request\n headers.

\n
\n

Depending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

You can set access permissions by using one of the following methods:

\n
    \n
  • \n

    Specify a canned ACL with the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned\n ACLs. Each canned ACL has a predefined set of grantees and\n permissions. Specify the canned ACL name as the value of\n x-amz-acl. If you use this header, you cannot use other\n access control-specific headers in your request. For more information, see\n Canned\n ACL.

    \n
  • \n
  • \n

    Specify access permissions explicitly with the\n x-amz-grant-read, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers,\n you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3\n groups) who will receive the permission. If you use these ACL-specific\n headers, you cannot use the x-amz-acl header to set a canned\n ACL. These parameters map to the set of permissions that Amazon S3 supports in an\n ACL. For more information, see Access Control List (ACL)\n Overview.

    \n

    You specify each grantee as a type=value pair, where the type is one of\n the following:

    \n
      \n
    • \n

      \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

      \n
    • \n
    • \n

      \n uri – if you are granting permissions to a predefined\n group

      \n
    • \n
    • \n

      \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

      \n \n

      Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

      \n
        \n
      • \n

        US East (N. Virginia)

        \n
      • \n
      • \n

        US West (N. California)

        \n
      • \n
      • \n

        US West (Oregon)

        \n
      • \n
      • \n

        Asia Pacific (Singapore)

        \n
      • \n
      • \n

        Asia Pacific (Sydney)

        \n
      • \n
      • \n

        Asia Pacific (Tokyo)

        \n
      • \n
      • \n

        Europe (Ireland)

        \n
      • \n
      • \n

        South America (São Paulo)

        \n
      • \n
      \n

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      \n
      \n
    • \n
    \n

    For example, the following x-amz-grant-write header grants\n create, overwrite, and delete objects permission to LogDelivery group\n predefined by Amazon S3 and two Amazon Web Services accounts identified by their email\n addresses.

    \n

    \n x-amz-grant-write:\n uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\",\n id=\"555566667777\" \n

    \n
  • \n
\n

You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.

\n
\n
Grantee Values
\n
\n

You can specify the person (grantee) to whom you're assigning access rights\n (using request elements) in the following ways:

\n
    \n
  • \n

    By the person's ID:

    \n

    \n <>ID<><>GranteesEmail<>\n \n

    \n

    DisplayName is optional and ignored in the request

    \n
  • \n
  • \n

    By URI:

    \n

    \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

    \n
  • \n
  • \n

    By Email address:

    \n

    \n <>Grantees@email.com<>&\n

    \n

    The grantee is resolved to the CanonicalUser and, in a response to a GET\n Object acl request, appears as the CanonicalUser.

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n
\n
\n

The following operations are related to PutBucketAcl:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set the ACL of a\n bucket, you must have the WRITE_ACP permission.

\n

You can use one of the following two ways to set a bucket's permissions:

\n
    \n
  • \n

    Specify the ACL in the request body

    \n
  • \n
  • \n

    Specify permissions using request headers

    \n
  • \n
\n \n

You cannot specify access permission using both the body and the request\n headers.

\n
\n

Depending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

You can set access permissions by using one of the following methods:

\n
    \n
  • \n

    Specify a canned ACL with the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned\n ACLs. Each canned ACL has a predefined set of grantees and\n permissions. Specify the canned ACL name as the value of\n x-amz-acl. If you use this header, you cannot use other\n access control-specific headers in your request. For more information, see\n Canned\n ACL.

    \n
  • \n
  • \n

    Specify access permissions explicitly with the\n x-amz-grant-read, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers,\n you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3\n groups) who will receive the permission. If you use these ACL-specific\n headers, you cannot use the x-amz-acl header to set a canned\n ACL. These parameters map to the set of permissions that Amazon S3 supports in an\n ACL. For more information, see Access Control List (ACL)\n Overview.

    \n

    You specify each grantee as a type=value pair, where the type is one of\n the following:

    \n
      \n
    • \n

      \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

      \n
    • \n
    • \n

      \n uri – if you are granting permissions to a predefined\n group

      \n
    • \n
    • \n

      \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

      \n \n

      Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

      \n
        \n
      • \n

        US East (N. Virginia)

        \n
      • \n
      • \n

        US West (N. California)

        \n
      • \n
      • \n

        US West (Oregon)

        \n
      • \n
      • \n

        Asia Pacific (Singapore)

        \n
      • \n
      • \n

        Asia Pacific (Sydney)

        \n
      • \n
      • \n

        Asia Pacific (Tokyo)

        \n
      • \n
      • \n

        Europe (Ireland)

        \n
      • \n
      • \n

        South America (São Paulo)

        \n
      • \n
      \n

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      \n
      \n
    • \n
    \n

    For example, the following x-amz-grant-write header grants\n create, overwrite, and delete objects permission to LogDelivery group\n predefined by Amazon S3 and two Amazon Web Services accounts identified by their email\n addresses.

    \n

    \n x-amz-grant-write:\n uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\",\n id=\"555566667777\" \n

    \n
  • \n
\n

You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.

\n
\n
Grantee Values
\n
\n

You can specify the person (grantee) to whom you're assigning access rights\n (using request elements) in the following ways:

\n
    \n
  • \n

    By the person's ID:

    \n

    \n <>ID<><>GranteesEmail<>\n \n

    \n

    DisplayName is optional and ignored in the request

    \n
  • \n
  • \n

    By URI:

    \n

    \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

    \n
  • \n
  • \n

    By Email address:

    \n

    \n <>Grantees@email.com<>&\n

    \n

    The grantee is resolved to the CanonicalUser and, in a response to a GET\n Object acl request, appears as the CanonicalUser.

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n
\n
\n

The following operations are related to PutBucketAcl:

\n ", "smithy.api#examples": [ { "title": "Put bucket acl", @@ -26723,6 +28914,11 @@ "method": "PUT", "uri": "/{Bucket}?acl", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -26765,7 +28961,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -26807,7 +29003,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -26825,11 +29021,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Sets an analytics configuration for the bucket (specified by the analytics configuration\n ID). You can have up to 1,000 analytics configurations per bucket.

\n

You can choose to have storage class analysis export analysis reports sent to a\n comma-separated values (CSV) flat file. See the DataExport request element.\n Reports are updated daily and are based on the object filters that you configure. When\n selecting data export, you specify a destination bucket and an optional destination prefix\n where the file is written. You can export the data to a destination bucket in a different\n account. However, the destination bucket must be in the same Region as the bucket that you\n are making the PUT analytics configuration to. For more information, see Amazon S3\n Analytics – Storage Class Analysis.

\n \n

You must create a bucket policy on the destination bucket where the exported file is\n written to grant permissions to Amazon S3 to write objects to the bucket. For an example\n policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

\n
\n

To use this operation, you must have permissions to perform the\n s3:PutAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

\n PutBucketAnalyticsConfiguration has the following special errors:

\n
    \n
  • \n
      \n
    • \n

      \n HTTP Error: HTTP 400 Bad Request\n

      \n
    • \n
    • \n

      \n Code: InvalidArgument\n

      \n
    • \n
    • \n

      \n Cause: Invalid argument.\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n HTTP Error: HTTP 400 Bad Request\n

      \n
    • \n
    • \n

      \n Code: TooManyConfigurations\n

      \n
    • \n
    • \n

      \n Cause: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n HTTP Error: HTTP 403 Forbidden\n

      \n
    • \n
    • \n

      \n Code: AccessDenied\n

      \n
    • \n
    • \n

      \n Cause: You are not the owner of the specified bucket, or you do\n not have the s3:PutAnalyticsConfiguration bucket permission to set the\n configuration on the bucket.\n

      \n
    • \n
    \n
  • \n
\n

The following operations are related to\n PutBucketAnalyticsConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets an analytics configuration for the bucket (specified by the analytics configuration\n ID). You can have up to 1,000 analytics configurations per bucket.

\n

You can choose to have storage class analysis export analysis reports sent to a\n comma-separated values (CSV) flat file. See the DataExport request element.\n Reports are updated daily and are based on the object filters that you configure. When\n selecting data export, you specify a destination bucket and an optional destination prefix\n where the file is written. You can export the data to a destination bucket in a different\n account. However, the destination bucket must be in the same Region as the bucket that you\n are making the PUT analytics configuration to. For more information, see Amazon S3\n Analytics – Storage Class Analysis.

\n \n

You must create a bucket policy on the destination bucket where the exported file is\n written to grant permissions to Amazon S3 to write objects to the bucket. For an example\n policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

\n
\n

To use this operation, you must have permissions to perform the\n s3:PutAnalyticsConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

\n PutBucketAnalyticsConfiguration has the following special errors:

\n
    \n
  • \n
      \n
    • \n

      \n HTTP Error: HTTP 400 Bad Request\n

      \n
    • \n
    • \n

      \n Code: InvalidArgument\n

      \n
    • \n
    • \n

      \n Cause: Invalid argument.\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n HTTP Error: HTTP 400 Bad Request\n

      \n
    • \n
    • \n

      \n Code: TooManyConfigurations\n

      \n
    • \n
    • \n

      \n Cause: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n HTTP Error: HTTP 403 Forbidden\n

      \n
    • \n
    • \n

      \n Code: AccessDenied\n

      \n
    • \n
    • \n

      \n Cause: You are not the owner of the specified bucket, or you do\n not have the s3:PutAnalyticsConfiguration bucket permission to set the\n configuration on the bucket.\n

      \n
    • \n
    \n
  • \n
\n

The following operations are related to\n PutBucketAnalyticsConfiguration:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?analytics", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -26867,7 +29068,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -26889,7 +29090,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Sets the cors configuration for your bucket. If the configuration exists,\n Amazon S3 replaces it.

\n

To use this operation, you must be allowed to perform the s3:PutBucketCORS\n action. By default, the bucket owner has this permission and can grant it to others.

\n

You set this configuration on a bucket so that the bucket can service cross-origin\n requests. For example, you might want to enable a request whose origin is\n http://www.example.com to access your Amazon S3 bucket at\n my.example.bucket.com by using the browser's XMLHttpRequest\n capability.

\n

To enable cross-origin resource sharing (CORS) on a bucket, you add the\n cors subresource to the bucket. The cors subresource is an XML\n document in which you configure rules that identify origins and the HTTP methods that can\n be executed on your bucket. The document is limited to 64 KB in size.

\n

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a\n bucket, it evaluates the cors configuration on the bucket and uses the first\n CORSRule rule that matches the incoming browser request to enable a\n cross-origin request. For a rule to match, the following conditions must be met:

\n
    \n
  • \n

    The request's Origin header must match AllowedOrigin\n elements.

    \n
  • \n
  • \n

    The request method (for example, GET, PUT, HEAD, and so on) or the\n Access-Control-Request-Method header in case of a pre-flight\n OPTIONS request must be one of the AllowedMethod\n elements.

    \n
  • \n
  • \n

    Every header specified in the Access-Control-Request-Headers request\n header of a pre-flight request must match an AllowedHeader element.\n

    \n
  • \n
\n

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in\n the Amazon S3 User Guide.

\n

The following operations are related to PutBucketCors:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the cors configuration for your bucket. If the configuration exists,\n Amazon S3 replaces it.

\n

To use this operation, you must be allowed to perform the s3:PutBucketCORS\n action. By default, the bucket owner has this permission and can grant it to others.

\n

You set this configuration on a bucket so that the bucket can service cross-origin\n requests. For example, you might want to enable a request whose origin is\n http://www.example.com to access your Amazon S3 bucket at\n my.example.bucket.com by using the browser's XMLHttpRequest\n capability.

\n

To enable cross-origin resource sharing (CORS) on a bucket, you add the\n cors subresource to the bucket. The cors subresource is an XML\n document in which you configure rules that identify origins and the HTTP methods that can\n be executed on your bucket. The document is limited to 64 KB in size.

\n

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a\n bucket, it evaluates the cors configuration on the bucket and uses the first\n CORSRule rule that matches the incoming browser request to enable a\n cross-origin request. For a rule to match, the following conditions must be met:

\n
    \n
  • \n

    The request's Origin header must match AllowedOrigin\n elements.

    \n
  • \n
  • \n

    The request method (for example, GET, PUT, HEAD, and so on) or the\n Access-Control-Request-Method header in case of a pre-flight\n OPTIONS request must be one of the AllowedMethod\n elements.

    \n
  • \n
  • \n

    Every header specified in the Access-Control-Request-Headers request\n header of a pre-flight request must match an AllowedHeader element.\n

    \n
  • \n
\n

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in\n the Amazon S3 User Guide.

\n

The following operations are related to PutBucketCors:

\n ", "smithy.api#examples": [ { "title": "To set cors configuration on a bucket.", @@ -26937,6 +29138,11 @@ "method": "PUT", "uri": "/{Bucket}?cors", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -26973,14 +29179,14 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27002,11 +29208,16 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

This action uses the encryption subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.

\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.

\n \n

This action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n

To use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to PutBucketEncryption:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action uses the encryption subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.

\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.

\n \n

This action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n

To use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to PutBucketEncryption:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?encryption", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27034,7 +29245,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -27049,7 +29260,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27067,11 +29278,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to\n 1,000 S3 Intelligent-Tiering configurations per bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to PutBucketIntelligentTieringConfiguration include:

\n \n \n

You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access\n or Deep Archive Access tier.

\n
\n

\n PutBucketIntelligentTieringConfiguration has the following special\n errors:

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: InvalidArgument

\n

\n Cause: Invalid Argument

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: TooManyConfigurations

\n

\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.

\n
\n
HTTP 403 Forbidden Error
\n
\n

\n Cause: You are not the owner of the specified bucket, or\n you do not have the s3:PutIntelligentTieringConfiguration bucket\n permission to set the configuration on the bucket.

\n
\n
", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to\n 1,000 S3 Intelligent-Tiering configurations per bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to PutBucketIntelligentTieringConfiguration include:

\n \n \n

You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access\n or Deep Archive Access tier.

\n
\n

\n PutBucketIntelligentTieringConfiguration has the following special\n errors:

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: InvalidArgument

\n

\n Cause: Invalid Argument

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: TooManyConfigurations

\n

\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.

\n
\n
HTTP 403 Forbidden Error
\n
\n

\n Cause: You are not the owner of the specified bucket, or\n you do not have the s3:PutIntelligentTieringConfiguration bucket\n permission to set the configuration on the bucket.

\n
\n
", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?intelligent-tiering", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27120,11 +29336,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

This implementation of the PUT action adds an inventory configuration\n (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory\n configurations per bucket.

\n

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly\n basis, and the results are published to a flat file. The bucket that is inventoried is\n called the source bucket, and the bucket where the inventory flat file\n is stored is called the destination bucket. The\n destination bucket must be in the same Amazon Web Services Region as the\n source bucket.

\n

When you configure an inventory for a source bucket, you specify\n the destination bucket where you want the inventory to be stored, and\n whether to generate the inventory daily or weekly. You can also configure what object\n metadata to include and whether to inventory all object versions or only current versions.\n For more information, see Amazon S3 Inventory in the\n Amazon S3 User Guide.

\n \n

You must create a bucket policy on the destination bucket to\n grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an\n example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

\n
\n
\n
Permissions
\n
\n

To use this operation, you must have permission to perform the\n s3:PutInventoryConfiguration action. The bucket owner has this\n permission by default and can grant this permission to others.

\n

The s3:PutInventoryConfiguration permission allows a user to\n create an S3 Inventory\n report that includes all object metadata fields available and to specify the\n destination bucket to store the inventory. A user with read access to objects in\n the destination bucket can also access all object metadata fields that are\n available in the inventory report.

\n

To restrict access to an inventory report, see Restricting access to an Amazon S3 Inventory report in the\n Amazon S3 User Guide. For more information about the metadata\n fields available in S3 Inventory, see Amazon S3 Inventory lists in the Amazon S3 User Guide. For\n more information about permissions, see Permissions related to bucket subresource operations and Identity and access management in Amazon S3 in the\n Amazon S3 User Guide.

\n
\n
\n

\n PutBucketInventoryConfiguration has the following special errors:

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: InvalidArgument

\n

\n Cause: Invalid Argument

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: TooManyConfigurations

\n

\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.

\n
\n
HTTP 403 Forbidden Error
\n
\n

\n Cause: You are not the owner of the specified bucket, or\n you do not have the s3:PutInventoryConfiguration bucket permission to\n set the configuration on the bucket.

\n
\n
\n

The following operations are related to\n PutBucketInventoryConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This implementation of the PUT action adds an inventory configuration\n (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory\n configurations per bucket.

\n

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly\n basis, and the results are published to a flat file. The bucket that is inventoried is\n called the source bucket, and the bucket where the inventory flat file\n is stored is called the destination bucket. The\n destination bucket must be in the same Amazon Web Services Region as the\n source bucket.

\n

When you configure an inventory for a source bucket, you specify\n the destination bucket where you want the inventory to be stored, and\n whether to generate the inventory daily or weekly. You can also configure what object\n metadata to include and whether to inventory all object versions or only current versions.\n For more information, see Amazon S3 Inventory in the\n Amazon S3 User Guide.

\n \n

You must create a bucket policy on the destination bucket to\n grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an\n example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

\n
\n
\n
Permissions
\n
\n

To use this operation, you must have permission to perform the\n s3:PutInventoryConfiguration action. The bucket owner has this\n permission by default and can grant this permission to others.

\n

The s3:PutInventoryConfiguration permission allows a user to\n create an S3 Inventory\n report that includes all object metadata fields available and to specify the\n destination bucket to store the inventory. A user with read access to objects in\n the destination bucket can also access all object metadata fields that are\n available in the inventory report.

\n

To restrict access to an inventory report, see Restricting access to an Amazon S3 Inventory report in the\n Amazon S3 User Guide. For more information about the metadata\n fields available in S3 Inventory, see Amazon S3 Inventory lists in the Amazon S3 User Guide. For\n more information about permissions, see Permissions related to bucket subresource operations and Identity and access management in Amazon S3 in the\n Amazon S3 User Guide.

\n
\n
\n

\n PutBucketInventoryConfiguration has the following special errors:

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: InvalidArgument

\n

\n Cause: Invalid Argument

\n
\n
HTTP 400 Bad Request Error
\n
\n

\n Code: TooManyConfigurations

\n

\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.

\n
\n
HTTP 403 Forbidden Error
\n
\n

\n Cause: You are not the owner of the specified bucket, or\n you do not have the s3:PutInventoryConfiguration bucket permission to\n set the configuration on the bucket.

\n
\n
\n

The following operations are related to\n PutBucketInventoryConfiguration:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?inventory", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27162,7 +29383,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27184,7 +29405,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.

\n \n

Bucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.

\n
\n
\n
Rules
\n
\n

You specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3\n Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.\n Each rule consists of the following:

\n
    \n
  • \n

    A filter identifying a subset of objects to which the rule applies. The\n filter can be based on a key name prefix, object tags, or a combination of\n both.

    \n
  • \n
  • \n

    A status indicating whether the rule is in effect.

    \n
  • \n
  • \n

    One or more lifecycle transition and expiration actions that you want\n Amazon S3 to perform on the objects identified by the filter. If the state of\n your bucket is versioning-enabled or versioning-suspended, you can have many\n versions of the same object (one current version and zero or more noncurrent\n versions). Amazon S3 provides predefined actions that you can specify for current\n and noncurrent object versions.

    \n
  • \n
\n

For more information, see Object Lifecycle\n Management and Lifecycle Configuration\n Elements.

\n
\n
Permissions
\n
\n

By default, all Amazon S3 resources are private, including buckets, objects, and\n related subresources (for example, lifecycle configuration and website\n configuration). Only the resource owner (that is, the Amazon Web Services account that created\n it) can access the resource. The resource owner can optionally grant access\n permissions to others by writing an access policy. For this operation, a user must\n get the s3:PutLifecycleConfiguration permission.

\n

You can also explicitly deny permissions. An explicit deny also supersedes any\n other permissions. If you want to block users or accounts from removing or\n deleting objects from your bucket, you must deny them permissions for the\n following actions:

\n
    \n
  • \n

    \n s3:DeleteObject\n

    \n
  • \n
  • \n

    \n s3:DeleteObjectVersion\n

    \n
  • \n
  • \n

    \n s3:PutLifecycleConfiguration\n

    \n
  • \n
\n

For more information about permissions, see Managing Access\n Permissions to Your Amazon S3 Resources.

\n
\n
\n

The following operations are related to\n PutBucketLifecycleConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.

\n \n

Bucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.

\n
\n
\n
Rules
\n
\n

You specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3\n Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.\n Each rule consists of the following:

\n
    \n
  • \n

    A filter identifying a subset of objects to which the rule applies. The\n filter can be based on a key name prefix, object tags, or a combination of\n both.

    \n
  • \n
  • \n

    A status indicating whether the rule is in effect.

    \n
  • \n
  • \n

    One or more lifecycle transition and expiration actions that you want\n Amazon S3 to perform on the objects identified by the filter. If the state of\n your bucket is versioning-enabled or versioning-suspended, you can have many\n versions of the same object (one current version and zero or more noncurrent\n versions). Amazon S3 provides predefined actions that you can specify for current\n and noncurrent object versions.

    \n
  • \n
\n

For more information, see Object Lifecycle\n Management and Lifecycle Configuration\n Elements.

\n
\n
Permissions
\n
\n

By default, all Amazon S3 resources are private, including buckets, objects, and\n related subresources (for example, lifecycle configuration and website\n configuration). Only the resource owner (that is, the Amazon Web Services account that created\n it) can access the resource. The resource owner can optionally grant access\n permissions to others by writing an access policy. For this operation, a user must\n get the s3:PutLifecycleConfiguration permission.

\n

You can also explicitly deny permissions. An explicit deny also supersedes any\n other permissions. If you want to block users or accounts from removing or\n deleting objects from your bucket, you must deny them permissions for the\n following actions:

\n
    \n
  • \n

    \n s3:DeleteObject\n

    \n
  • \n
  • \n

    \n s3:DeleteObjectVersion\n

    \n
  • \n
  • \n

    \n s3:PutLifecycleConfiguration\n

    \n
  • \n
\n

For more information about permissions, see Managing Access\n Permissions to Your Amazon S3 Resources.

\n
\n
\n

The following operations are related to\n PutBucketLifecycleConfiguration:

\n ", "smithy.api#examples": [ { "title": "Put bucket lifecycle", @@ -27218,6 +29439,11 @@ "method": "PUT", "uri": "/{Bucket}?lifecycle", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27238,7 +29464,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -27253,7 +29479,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27275,7 +29501,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Set the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as\n the source bucket. To set the logging status of a bucket, you must be the bucket\n owner.

\n

The bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee request element to grant access to other people. The\n Permissions request element specifies the kind of access the grantee has to\n the logs.

\n \n

If the target bucket for log delivery uses the bucket owner enforced setting for S3\n Object Ownership, you can't use the Grantee request element to grant access\n to others. Permissions can only be granted using policies. For more information, see\n Permissions for server access log delivery in the\n Amazon S3 User Guide.

\n
\n
\n
Grantee Values
\n
\n

You can specify the person (grantee) to whom you're assigning access rights (by\n using request elements) in the following ways:

\n
    \n
  • \n

    By the person's ID:

    \n

    \n <>ID<><>GranteesEmail<>\n \n

    \n

    \n DisplayName is optional and ignored in the request.

    \n
  • \n
  • \n

    By Email address:

    \n

    \n <>Grantees@email.com<>\n

    \n

    The grantee is resolved to the CanonicalUser and, in a\n response to a GETObjectAcl request, appears as the\n CanonicalUser.

    \n
  • \n
  • \n

    By URI:

    \n

    \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

    \n
  • \n
\n
\n
\n

To enable logging, you use LoggingEnabled and its children request\n elements. To disable logging, you use an empty BucketLoggingStatus request\n element:

\n

\n \n

\n

For more information about server access logging, see Server Access Logging in the\n Amazon S3 User Guide.

\n

For more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.

\n

The following operations are related to PutBucketLogging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Set the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as\n the source bucket. To set the logging status of a bucket, you must be the bucket\n owner.

\n

The bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee request element to grant access to other people. The\n Permissions request element specifies the kind of access the grantee has to\n the logs.

\n \n

If the target bucket for log delivery uses the bucket owner enforced setting for S3\n Object Ownership, you can't use the Grantee request element to grant access\n to others. Permissions can only be granted using policies. For more information, see\n Permissions for server access log delivery in the\n Amazon S3 User Guide.

\n
\n
\n
Grantee Values
\n
\n

You can specify the person (grantee) to whom you're assigning access rights (by\n using request elements) in the following ways:

\n
    \n
  • \n

    By the person's ID:

    \n

    \n <>ID<><>GranteesEmail<>\n \n

    \n

    \n DisplayName is optional and ignored in the request.

    \n
  • \n
  • \n

    By Email address:

    \n

    \n <>Grantees@email.com<>\n

    \n

    The grantee is resolved to the CanonicalUser and, in a\n response to a GETObjectAcl request, appears as the\n CanonicalUser.

    \n
  • \n
  • \n

    By URI:

    \n

    \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

    \n
  • \n
\n
\n
\n

To enable logging, you use LoggingEnabled and its children request\n elements. To disable logging, you use an empty BucketLoggingStatus request\n element:

\n

\n \n

\n

For more information about server access logging, see Server Access Logging in the\n Amazon S3 User Guide.

\n

For more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.

\n

The following operations are related to PutBucketLogging:

\n ", "smithy.api#examples": [ { "title": "Set logging configuration for a bucket", @@ -27304,6 +29530,11 @@ "method": "PUT", "uri": "/{Bucket}?logging", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27340,14 +29571,14 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27365,11 +29596,16 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.

\n

To use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring\n Metrics with Amazon CloudWatch.

\n

The following operations are related to\n PutBucketMetricsConfiguration:

\n \n

\n PutBucketMetricsConfiguration has the following special error:

\n
    \n
  • \n

    Error code: TooManyConfigurations\n

    \n
      \n
    • \n

      Description: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.

      \n
    • \n
    • \n

      HTTP Status Code: HTTP 400 Bad Request

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.

\n

To use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring\n Metrics with Amazon CloudWatch.

\n

The following operations are related to\n PutBucketMetricsConfiguration:

\n \n

\n PutBucketMetricsConfiguration has the following special error:

\n
    \n
  • \n

    Error code: TooManyConfigurations\n

    \n
      \n
    • \n

      Description: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.

      \n
    • \n
    • \n

      HTTP Status Code: HTTP 400 Bad Request

      \n
    • \n
    \n
  • \n
", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?metrics", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27407,7 +29643,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27425,7 +29661,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Enables notifications of specified events for a bucket. For more information about event\n notifications, see Configuring Event\n Notifications.

\n

Using this API, you can replace an existing notification configuration. The\n configuration is an XML file that defines the event types that you want Amazon S3 to publish and\n the destination where you want Amazon S3 to publish an event notification when it detects an\n event of the specified type.

\n

By default, your bucket has no event notifications configured. That is, the notification\n configuration will be an empty NotificationConfiguration.

\n

\n \n

\n

\n \n

\n

This action replaces the existing notification configuration with the configuration you\n include in the request body.

\n

After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification\n Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and\n that the bucket owner has permission to publish to it by sending a test notification. In\n the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions\n grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information,\n see Configuring Notifications for Amazon S3 Events.

\n

You can disable notifications by adding the empty NotificationConfiguration\n element.

\n

For more information about the number of event notification configurations that you can\n create per bucket, see Amazon S3 service quotas in Amazon Web Services\n General Reference.

\n

By default, only the bucket owner can configure notifications on a bucket. However,\n bucket owners can use a bucket policy to grant permission to other users to set this\n configuration with the required s3:PutBucketNotification permission.

\n \n

The PUT notification is an atomic operation. For example, suppose your notification\n configuration includes SNS topic, SQS queue, and Lambda function configurations. When\n you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS\n topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the\n configuration to your bucket.

\n
\n

If the configuration in the request body includes only one\n TopicConfiguration specifying only the\n s3:ReducedRedundancyLostObject event type, the response will also include\n the x-amz-sns-test-message-id header containing the message ID of the test\n notification sent to the topic.

\n

The following action is related to\n PutBucketNotificationConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Enables notifications of specified events for a bucket. For more information about event\n notifications, see Configuring Event\n Notifications.

\n

Using this API, you can replace an existing notification configuration. The\n configuration is an XML file that defines the event types that you want Amazon S3 to publish and\n the destination where you want Amazon S3 to publish an event notification when it detects an\n event of the specified type.

\n

By default, your bucket has no event notifications configured. That is, the notification\n configuration will be an empty NotificationConfiguration.

\n

\n \n

\n

\n \n

\n

This action replaces the existing notification configuration with the configuration you\n include in the request body.

\n

After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification\n Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and\n that the bucket owner has permission to publish to it by sending a test notification. In\n the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions\n grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information,\n see Configuring Notifications for Amazon S3 Events.

\n

You can disable notifications by adding the empty NotificationConfiguration\n element.

\n

For more information about the number of event notification configurations that you can\n create per bucket, see Amazon S3 service quotas in Amazon Web Services\n General Reference.

\n

By default, only the bucket owner can configure notifications on a bucket. However,\n bucket owners can use a bucket policy to grant permission to other users to set this\n configuration with the required s3:PutBucketNotification permission.

\n \n

The PUT notification is an atomic operation. For example, suppose your notification\n configuration includes SNS topic, SQS queue, and Lambda function configurations. When\n you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS\n topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the\n configuration to your bucket.

\n
\n

If the configuration in the request body includes only one\n TopicConfiguration specifying only the\n s3:ReducedRedundancyLostObject event type, the response will also include\n the x-amz-sns-test-message-id header containing the message ID of the test\n notification sent to the topic.

\n

The following action is related to\n PutBucketNotificationConfiguration:

\n ", "smithy.api#examples": [ { "title": "Set notification configuration for a bucket", @@ -27449,6 +29685,11 @@ "method": "PUT", "uri": "/{Bucket}?notification", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27477,7 +29718,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -27505,11 +29746,16 @@ "aws.protocols#httpChecksum": { "requestChecksumRequired": true }, - "smithy.api#documentation": "

Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this\n operation, you must have the s3:PutBucketOwnershipControls permission. For\n more information about Amazon S3 permissions, see Specifying permissions in a\n policy.

\n

For information about Amazon S3 Object Ownership, see Using object\n ownership.

\n

The following operations are related to PutBucketOwnershipControls:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this\n operation, you must have the s3:PutBucketOwnershipControls permission. For\n more information about Amazon S3 permissions, see Specifying permissions in a\n policy.

\n

For information about Amazon S3 Object Ownership, see Using object\n ownership.

\n

The following operations are related to PutBucketOwnershipControls:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?ownershipControls", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27537,7 +29783,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -27568,7 +29814,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than\n the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n PutBucketPolicy permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.

\n

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403\n Access Denied error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed error.

\n \n

To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy, PutBucketPolicy, and\n DeleteBucketPolicy API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.

\n
\n

For more information, see Bucket policy\n examples.

\n

The following operations are related to PutBucketPolicy:

\n ", + "smithy.api#documentation": "

Applies an Amazon S3 bucket policy to an Amazon S3 bucket.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the\n PutBucketPolicy permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.

\n

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403\n Access Denied error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed error.

\n \n

To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy, PutBucketPolicy, and\n DeleteBucketPolicy API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.

\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The s3:PutBucketPolicy permission is required in a policy. \n For more information about general purpose buckets bucket policies, see Using Bucket Policies and User\n Policies in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n
  • \n
\n
\n
Example bucket policies
\n
\n

\n General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.

\n

\n Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to PutBucketPolicy:

\n ", "smithy.api#examples": [ { "title": "Set bucket policy", @@ -27583,6 +29829,11 @@ "method": "PUT", "uri": "/{Bucket}?policy", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27592,7 +29843,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket.

", + "smithy.api#documentation": "

The name of the bucket.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -27603,28 +29854,28 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The MD5 hash of the request body.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The MD5 hash of the request body.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    CRC32

    \n
  • \n
  • \n

    CRC32C

    \n
  • \n
  • \n

    SHA1

    \n
  • \n
  • \n

    SHA256

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ConfirmRemoveSelfBucketAccess": { "target": "com.amazonaws.s3#ConfirmRemoveSelfBucketAccess", "traits": { - "smithy.api#documentation": "

Set this parameter to true to confirm that you want to remove your permissions to change\n this bucket policy in the future.

", + "smithy.api#documentation": "

Set this parameter to true to confirm that you want to remove your permissions to change\n this bucket policy in the future.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-confirm-remove-self-bucket-access" } }, "Policy": { "target": "com.amazonaws.s3#Policy", "traits": { - "smithy.api#documentation": "

The bucket policy as a JSON document.

", + "smithy.api#documentation": "

The bucket policy as a JSON document.

\n

For directory buckets, the only IAM action supported in the bucket policy is s3express:CreateSession.

", "smithy.api#httpPayload": {}, "smithy.api#required": {} } @@ -27632,7 +29883,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

\n \n

For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented.

\n
", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27654,7 +29905,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Creates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.

\n

Specify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information. You can invoke this request for a specific\n Amazon Web Services Region by using the \n \n aws:RequestedRegion\n condition key.

\n

A replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.

\n

To specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication, Status, and\n Priority.

\n \n

If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.

\n
\n

For information about enabling versioning on a bucket, see Using Versioning.

\n
\n
Handling Replication of Encrypted Objects
\n
\n

By default, Amazon S3 doesn't replicate objects that are stored at rest using\n server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects,\n add the following: SourceSelectionCriteria,\n SseKmsEncryptedObjects, Status,\n EncryptionConfiguration, and ReplicaKmsKeyID. For\n information about replication configuration, see Replicating\n Objects Created with SSE Using KMS keys.

\n

For information on PutBucketReplication errors, see List of\n replication-related error codes\n

\n
\n
Permissions
\n
\n

To create a PutBucketReplication request, you must have\n s3:PutReplicationConfiguration permissions for the bucket.\n \n

\n

By default, a resource owner, in this case the Amazon Web Services account that created the\n bucket, can perform this operation. The resource owner can also grant others\n permissions to perform the operation. For more information about permissions, see\n Specifying Permissions in\n a Policy and Managing Access\n Permissions to Your Amazon S3 Resources.

\n \n

To perform this operation, the user or role performing the action must have\n the iam:PassRole\n permission.

\n
\n
\n
\n

The following operations are related to PutBucketReplication:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Creates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.

\n

Specify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information. You can invoke this request for a specific\n Amazon Web Services Region by using the \n \n aws:RequestedRegion\n condition key.

\n

A replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.

\n

To specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication, Status, and\n Priority.

\n \n

If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.

\n
\n

For information about enabling versioning on a bucket, see Using Versioning.

\n
\n
Handling Replication of Encrypted Objects
\n
\n

By default, Amazon S3 doesn't replicate objects that are stored at rest using\n server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects,\n add the following: SourceSelectionCriteria,\n SseKmsEncryptedObjects, Status,\n EncryptionConfiguration, and ReplicaKmsKeyID. For\n information about replication configuration, see Replicating\n Objects Created with SSE Using KMS keys.

\n

For information on PutBucketReplication errors, see List of\n replication-related error codes\n

\n
\n
Permissions
\n
\n

To create a PutBucketReplication request, you must have\n s3:PutReplicationConfiguration permissions for the bucket.\n \n

\n

By default, a resource owner, in this case the Amazon Web Services account that created the\n bucket, can perform this operation. The resource owner can also grant others\n permissions to perform the operation. For more information about permissions, see\n Specifying Permissions in\n a Policy and Managing Access\n Permissions to Your Amazon S3 Resources.

\n \n

To perform this operation, the user or role performing the action must have\n the iam:PassRole\n permission.

\n
\n
\n
\n

The following operations are related to PutBucketReplication:

\n ", "smithy.api#examples": [ { "title": "Set replication configuration on a bucket", @@ -27681,6 +29932,11 @@ "method": "PUT", "uri": "/{Bucket}?replication", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27708,7 +29964,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -27730,7 +29986,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27752,7 +30008,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Sets the request payment configuration for a bucket. By default, the bucket owner pays\n for downloads from the bucket. This configuration parameter enables the bucket owner (only)\n to specify that the person requesting the download will be charged for the download. For\n more information, see Requester Pays\n Buckets.

\n

The following operations are related to PutBucketRequestPayment:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the request payment configuration for a bucket. By default, the bucket owner pays\n for downloads from the bucket. This configuration parameter enables the bucket owner (only)\n to specify that the person requesting the download will be charged for the download. For\n more information, see Requester Pays\n Buckets.

\n

The following operations are related to PutBucketRequestPayment:

\n ", "smithy.api#examples": [ { "title": "Set request payment configuration on a bucket.", @@ -27769,6 +30025,11 @@ "method": "PUT", "uri": "/{Bucket}?requestPayment", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27796,7 +30057,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -27812,7 +30073,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27834,7 +30095,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Sets the tags for a bucket.

\n

Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this,\n sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost\n of combined resources, organize your billing information according to resources with the\n same tag key values. For example, you can tag several resources with a specific application\n name, and then organize your billing information to see the total cost of that application\n across several services. For more information, see Cost Allocation and\n Tagging and Using Cost Allocation in Amazon S3\n Bucket Tags.

\n \n

When this operation sets the tags for a bucket, it will overwrite any current tags\n the bucket already has. You cannot use this operation to add tags to an existing list of\n tags.

\n
\n

To use this operation, you must have permissions to perform the\n s3:PutBucketTagging action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

\n PutBucketTagging has the following special errors. For more Amazon S3 errors\n see, Error\n Responses.

\n
    \n
  • \n

    \n InvalidTag - The tag provided was not a valid tag. This error\n can occur if the tag did not pass input validation. For more information, see Using\n Cost Allocation in Amazon S3 Bucket Tags.

    \n
  • \n
  • \n

    \n MalformedXML - The XML provided does not match the\n schema.

    \n
  • \n
  • \n

    \n OperationAborted - A conflicting conditional action is\n currently in progress against this resource. Please try again.

    \n
  • \n
  • \n

    \n InternalError - The service was unable to apply the provided\n tag to the bucket.

    \n
  • \n
\n

The following operations are related to PutBucketTagging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the tags for a bucket.

\n

Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this,\n sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost\n of combined resources, organize your billing information according to resources with the\n same tag key values. For example, you can tag several resources with a specific application\n name, and then organize your billing information to see the total cost of that application\n across several services. For more information, see Cost Allocation and\n Tagging and Using Cost Allocation in Amazon S3\n Bucket Tags.

\n \n

When this operation sets the tags for a bucket, it will overwrite any current tags\n the bucket already has. You cannot use this operation to add tags to an existing list of\n tags.

\n
\n

To use this operation, you must have permissions to perform the\n s3:PutBucketTagging action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

\n PutBucketTagging has the following special errors. For more Amazon S3 errors\n see, Error\n Responses.

\n
    \n
  • \n

    \n InvalidTag - The tag provided was not a valid tag. This error\n can occur if the tag did not pass input validation. For more information, see Using\n Cost Allocation in Amazon S3 Bucket Tags.

    \n
  • \n
  • \n

    \n MalformedXML - The XML provided does not match the\n schema.

    \n
  • \n
  • \n

    \n OperationAborted - A conflicting conditional action is\n currently in progress against this resource. Please try again.

    \n
  • \n
  • \n

    \n InternalError - The service was unable to apply the provided\n tag to the bucket.

    \n
  • \n
\n

The following operations are related to PutBucketTagging:

\n ", "smithy.api#examples": [ { "title": "Set tags on a bucket", @@ -27860,6 +30121,11 @@ "method": "PUT", "uri": "/{Bucket}?tagging", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27887,7 +30153,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -27903,7 +30169,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -27925,7 +30191,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Sets the versioning state of an existing bucket.

\n

You can set the versioning state with one of the following values:

\n

\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.

\n

\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.

\n

If the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.

\n

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request header and the Status and the\n MfaDelete request elements in a request to set the versioning state of the\n bucket.

\n \n

If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.

\n
\n

The following operations are related to PutBucketVersioning:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the versioning state of an existing bucket.

\n

You can set the versioning state with one of the following values:

\n

\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.

\n

\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.

\n

If the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.

\n

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request header and the Status and the\n MfaDelete request elements in a request to set the versioning state of the\n bucket.

\n \n

If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.

\n
\n

The following operations are related to PutBucketVersioning:

\n ", "smithy.api#examples": [ { "title": "Set versioning configuration on a bucket", @@ -27943,6 +30209,11 @@ "method": "PUT", "uri": "/{Bucket}?versioning", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -27970,7 +30241,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -27993,7 +30264,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -28015,7 +30286,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Sets the configuration of the website that is specified in the website\n subresource. To configure a bucket as a website, you can add this subresource on the bucket\n with website configuration information such as the file name of the index document and any\n redirect rules. For more information, see Hosting Websites on Amazon S3.

\n

This PUT action requires the S3:PutBucketWebsite permission. By default,\n only the bucket owner can configure the website attached to a bucket; however, bucket\n owners can allow other users to set the website configuration by writing a bucket policy\n that grants them the S3:PutBucketWebsite permission.

\n

To redirect all website requests sent to the bucket's website endpoint, you add a\n website configuration with the following elements. Because all requests are sent to another\n website, you don't need to provide index document name for the bucket.

\n
    \n
  • \n

    \n WebsiteConfiguration\n

    \n
  • \n
  • \n

    \n RedirectAllRequestsTo\n

    \n
  • \n
  • \n

    \n HostName\n

    \n
  • \n
  • \n

    \n Protocol\n

    \n
  • \n
\n

If you want granular control over redirects, you can use the following elements to add\n routing rules that describe conditions for redirecting requests and information about the\n redirect destination. In this case, the website configuration must provide an index\n document for the bucket, because some requests might not be redirected.

\n
    \n
  • \n

    \n WebsiteConfiguration\n

    \n
  • \n
  • \n

    \n IndexDocument\n

    \n
  • \n
  • \n

    \n Suffix\n

    \n
  • \n
  • \n

    \n ErrorDocument\n

    \n
  • \n
  • \n

    \n Key\n

    \n
  • \n
  • \n

    \n RoutingRules\n

    \n
  • \n
  • \n

    \n RoutingRule\n

    \n
  • \n
  • \n

    \n Condition\n

    \n
  • \n
  • \n

    \n HttpErrorCodeReturnedEquals\n

    \n
  • \n
  • \n

    \n KeyPrefixEquals\n

    \n
  • \n
  • \n

    \n Redirect\n

    \n
  • \n
  • \n

    \n Protocol\n

    \n
  • \n
  • \n

    \n HostName\n

    \n
  • \n
  • \n

    \n ReplaceKeyPrefixWith\n

    \n
  • \n
  • \n

    \n ReplaceKeyWith\n

    \n
  • \n
  • \n

    \n HttpRedirectCode\n

    \n
  • \n
\n

Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more\n than 50 routing rules, you can use object redirect. For more information, see Configuring an\n Object Redirect in the Amazon S3 User Guide.

\n

The maximum request length is limited to 128 KB.

", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the configuration of the website that is specified in the website\n subresource. To configure a bucket as a website, you can add this subresource on the bucket\n with website configuration information such as the file name of the index document and any\n redirect rules. For more information, see Hosting Websites on Amazon S3.

\n

This PUT action requires the S3:PutBucketWebsite permission. By default,\n only the bucket owner can configure the website attached to a bucket; however, bucket\n owners can allow other users to set the website configuration by writing a bucket policy\n that grants them the S3:PutBucketWebsite permission.

\n

To redirect all website requests sent to the bucket's website endpoint, you add a\n website configuration with the following elements. Because all requests are sent to another\n website, you don't need to provide index document name for the bucket.

\n
    \n
  • \n

    \n WebsiteConfiguration\n

    \n
  • \n
  • \n

    \n RedirectAllRequestsTo\n

    \n
  • \n
  • \n

    \n HostName\n

    \n
  • \n
  • \n

    \n Protocol\n

    \n
  • \n
\n

If you want granular control over redirects, you can use the following elements to add\n routing rules that describe conditions for redirecting requests and information about the\n redirect destination. In this case, the website configuration must provide an index\n document for the bucket, because some requests might not be redirected.

\n
    \n
  • \n

    \n WebsiteConfiguration\n

    \n
  • \n
  • \n

    \n IndexDocument\n

    \n
  • \n
  • \n

    \n Suffix\n

    \n
  • \n
  • \n

    \n ErrorDocument\n

    \n
  • \n
  • \n

    \n Key\n

    \n
  • \n
  • \n

    \n RoutingRules\n

    \n
  • \n
  • \n

    \n RoutingRule\n

    \n
  • \n
  • \n

    \n Condition\n

    \n
  • \n
  • \n

    \n HttpErrorCodeReturnedEquals\n

    \n
  • \n
  • \n

    \n KeyPrefixEquals\n

    \n
  • \n
  • \n

    \n Redirect\n

    \n
  • \n
  • \n

    \n Protocol\n

    \n
  • \n
  • \n

    \n HostName\n

    \n
  • \n
  • \n

    \n ReplaceKeyPrefixWith\n

    \n
  • \n
  • \n

    \n ReplaceKeyWith\n

    \n
  • \n
  • \n

    \n HttpRedirectCode\n

    \n
  • \n
\n

Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more\n than 50 routing rules, you can use object redirect. For more information, see Configuring an\n Object Redirect in the Amazon S3 User Guide.

\n

The maximum request length is limited to 128 KB.

", "smithy.api#examples": [ { "title": "Set website configuration on a bucket", @@ -28038,6 +30309,11 @@ "method": "PUT", "uri": "/{Bucket}?website", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -28065,7 +30341,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -28081,7 +30357,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -28102,7 +30378,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.

\n \n

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.

\n
\n

Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock.

\n

To ensure that data is not corrupted traversing the network, use the\n Content-MD5 header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

\n \n
    \n
  • \n

    To successfully complete the PutObject request, you must have the\n s3:PutObject in your IAM permissions.

    \n
  • \n
  • \n

    To successfully change the objects acl of your PutObject request,\n you must have the s3:PutObjectAcl in your IAM permissions.

    \n
  • \n
  • \n

    To successfully set the tag-set with your PutObject request, you\n must have the s3:PutObjectTagging in your IAM permissions.

    \n
  • \n
  • \n

    The Content-MD5 header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information about Amazon S3 Object Lock, see Amazon S3 Object Lock\n Overview in the Amazon S3 User Guide.

    \n
  • \n
\n
\n

You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption.

\n

When adding a new object, you can use headers to grant ACL-based permissions to\n individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are\n then added to the ACL on the object. By default, all objects are private. Only the owner\n has full access control. For more information, see Access Control List (ACL) Overview\n and Managing\n ACLs Using the REST API.

\n

If the bucket that you're uploading objects to uses the bucket owner enforced setting\n for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that\n use this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control\n canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that\n contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a\n 400 error with the error code AccessControlListNotSupported.\n For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.

\n \n

If your bucket uses the bucket owner enforced setting for Object Ownership, all\n objects written to the bucket by any account will be owned by the bucket owner.

\n
\n

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.

\n

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

\n

For more information about related Amazon S3 APIs, see the following:

\n ", + "smithy.api#documentation": "

Adds an object to a bucket.

\n \n
    \n
  • \n

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.

    \n
  • \n
  • \n

    If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All\n objects written to the bucket by any account will be owned by the bucket owner.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:

\n
    \n
  • \n

    \n S3 Object Lock - To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock in the Amazon S3 User Guide.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
  • \n

    \n S3 Versioning - When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID\n of that object being stored in Amazon S3. \n You can retrieve, replace, or delete any version of the object. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets in the Amazon S3\n User Guide. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n PutObject request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:PutObject\n - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object\n to it.

      \n
    • \n
    • \n

      \n \n s3:PutObjectAcl\n - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl.

      \n
    • \n
    • \n

      \n \n s3:PutObjectTagging\n - To successfully set the tag-set with your PutObject request, you\n must have the s3:PutObjectTagging.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Data integrity with Content-MD5
\n
\n
    \n
  • \n

    \n General purpose bucket - To ensure that data is not corrupted traversing the network, use the\n Content-MD5 header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, \n you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

    \n
  • \n
  • \n

    \n Directory bucket - This functionality is not supported for directory buckets.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

For more information about related Amazon S3 APIs, see the following:

\n ", "smithy.api#examples": [ { "title": "To upload an object and specify optional tags", @@ -28144,7 +30420,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Uses the acl subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.

\n

This action is not supported by Amazon S3 on Outposts.

\n

Depending on your application needs, you can choose to set the ACL on an object using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, you can continue to use that approach.\n For more information, see Access Control List (ACL) Overview\n in the Amazon S3 User Guide.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

You can set access permissions using one of the following methods:

\n
    \n
  • \n

    Specify a canned ACL with the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has\n a predefined set of grantees and permissions. Specify the canned ACL name as\n the value of x-amz-acl. If you use this header, you cannot use\n other access control-specific headers in your request. For more information,\n see Canned\n ACL.

    \n
  • \n
  • \n

    Specify access permissions explicitly with the\n x-amz-grant-read, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers,\n you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3\n groups) who will receive the permission. If you use these ACL-specific\n headers, you cannot use x-amz-acl header to set a canned ACL.\n These parameters map to the set of permissions that Amazon S3 supports in an ACL.\n For more information, see Access Control List (ACL)\n Overview.

    \n

    You specify each grantee as a type=value pair, where the type is one of\n the following:

    \n
      \n
    • \n

      \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

      \n
    • \n
    • \n

      \n uri – if you are granting permissions to a predefined\n group

      \n
    • \n
    • \n

      \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

      \n \n

      Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

      \n
        \n
      • \n

        US East (N. Virginia)

        \n
      • \n
      • \n

        US West (N. California)

        \n
      • \n
      • \n

        US West (Oregon)

        \n
      • \n
      • \n

        Asia Pacific (Singapore)

        \n
      • \n
      • \n

        Asia Pacific (Sydney)

        \n
      • \n
      • \n

        Asia Pacific (Tokyo)

        \n
      • \n
      • \n

        Europe (Ireland)

        \n
      • \n
      • \n

        South America (São Paulo)

        \n
      • \n
      \n

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      \n
      \n
    • \n
    \n

    For example, the following x-amz-grant-read header grants\n list objects permission to the two Amazon Web Services accounts identified by their email\n addresses.

    \n

    \n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\" \n

    \n
  • \n
\n

You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.

\n
\n
Grantee Values
\n
\n

You can specify the person (grantee) to whom you're assigning access rights\n (using request elements) in the following ways:

\n
    \n
  • \n

    By the person's ID:

    \n

    \n <>ID<><>GranteesEmail<>\n \n

    \n

    DisplayName is optional and ignored in the request.

    \n
  • \n
  • \n

    By URI:

    \n

    \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

    \n
  • \n
  • \n

    By Email address:

    \n

    \n <>Grantees@email.com<>lt;/Grantee>\n

    \n

    The grantee is resolved to the CanonicalUser and, in a response to a GET\n Object acl request, appears as the CanonicalUser.

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n
\n
Versioning
\n
\n

The ACL of an object is set at the object version level. By default, PUT sets\n the ACL of the current version of an object. To set the ACL of a different\n version, use the versionId subresource.

\n
\n
\n

The following operations are related to PutObjectAcl:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Uses the acl subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have the WRITE_ACP\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

Depending on your application needs, you can choose to set the ACL on an object using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, you can continue to use that approach.\n For more information, see Access Control List (ACL) Overview\n in the Amazon S3 User Guide.

\n \n

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n

You can set access permissions using one of the following methods:

\n
    \n
  • \n

    Specify a canned ACL with the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has\n a predefined set of grantees and permissions. Specify the canned ACL name as\n the value of x-amz-acl. If you use this header, you cannot use\n other access control-specific headers in your request. For more information,\n see Canned\n ACL.

    \n
  • \n
  • \n

    Specify access permissions explicitly with the\n x-amz-grant-read, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers,\n you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3\n groups) who will receive the permission. If you use these ACL-specific\n headers, you cannot use x-amz-acl header to set a canned ACL.\n These parameters map to the set of permissions that Amazon S3 supports in an ACL.\n For more information, see Access Control List (ACL)\n Overview.

    \n

    You specify each grantee as a type=value pair, where the type is one of\n the following:

    \n
      \n
    • \n

      \n id – if the value specified is the canonical user ID\n of an Amazon Web Services account

      \n
    • \n
    • \n

      \n uri – if you are granting permissions to a predefined\n group

      \n
    • \n
    • \n

      \n emailAddress – if the value specified is the email\n address of an Amazon Web Services account

      \n \n

      Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

      \n
        \n
      • \n

        US East (N. Virginia)

        \n
      • \n
      • \n

        US West (N. California)

        \n
      • \n
      • \n

        US West (Oregon)

        \n
      • \n
      • \n

        Asia Pacific (Singapore)

        \n
      • \n
      • \n

        Asia Pacific (Sydney)

        \n
      • \n
      • \n

        Asia Pacific (Tokyo)

        \n
      • \n
      • \n

        Europe (Ireland)

        \n
      • \n
      • \n

        South America (São Paulo)

        \n
      • \n
      \n

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      \n
      \n
    • \n
    \n

    For example, the following x-amz-grant-read header grants\n list objects permission to the two Amazon Web Services accounts identified by their email\n addresses.

    \n

    \n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\" \n

    \n
  • \n
\n

You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.

\n
\n
Grantee Values
\n
\n

You can specify the person (grantee) to whom you're assigning access rights\n (using request elements) in the following ways:

\n
    \n
  • \n

    By the person's ID:

    \n

    \n <>ID<><>GranteesEmail<>\n \n

    \n

    DisplayName is optional and ignored in the request.

    \n
  • \n
  • \n

    By URI:

    \n

    \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

    \n
  • \n
  • \n

    By Email address:

    \n

    \n <>Grantees@email.com<>lt;/Grantee>\n

    \n

    The grantee is resolved to the CanonicalUser and, in a response to a GET\n Object acl request, appears as the CanonicalUser.

    \n \n

    Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

    \n
      \n
    • \n

      US East (N. Virginia)

      \n
    • \n
    • \n

      US West (N. California)

      \n
    • \n
    • \n

      US West (Oregon)

      \n
    • \n
    • \n

      Asia Pacific (Singapore)

      \n
    • \n
    • \n

      Asia Pacific (Sydney)

      \n
    • \n
    • \n

      Asia Pacific (Tokyo)

      \n
    • \n
    • \n

      Europe (Ireland)

      \n
    • \n
    • \n

      South America (São Paulo)

      \n
    • \n
    \n

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    \n
    \n
  • \n
\n
\n
Versioning
\n
\n

The ACL of an object is set at the object version level. By default, PUT sets\n the ACL of the current version of an object. To set the ACL of a different\n version, use the versionId subresource.

\n
\n
\n

The following operations are related to PutObjectAcl:

\n ", "smithy.api#examples": [ { "title": "To grant permissions using object ACL", @@ -28201,7 +30477,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name that contains the object to which you want to attach the ACL.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name that contains the object to which you want to attach the ACL.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -28219,28 +30495,28 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "GrantFullControl": { "target": "com.amazonaws.s3#GrantFullControl", "traits": { - "smithy.api#documentation": "

Allows grantee the read, write, read ACP, and write ACP permissions on the\n bucket.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee the read, write, read ACP, and write ACP permissions on the\n bucket.

\n

This functionality is not supported for Amazon S3 on Outposts.

", "smithy.api#httpHeader": "x-amz-grant-full-control" } }, "GrantRead": { "target": "com.amazonaws.s3#GrantRead", "traits": { - "smithy.api#documentation": "

Allows grantee to list the objects in the bucket.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to list the objects in the bucket.

\n

This functionality is not supported for Amazon S3 on Outposts.

", "smithy.api#httpHeader": "x-amz-grant-read" } }, "GrantReadACP": { "target": "com.amazonaws.s3#GrantReadACP", "traits": { - "smithy.api#documentation": "

Allows grantee to read the bucket ACL.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to read the bucket ACL.

\n

This functionality is not supported for Amazon S3 on Outposts.

", "smithy.api#httpHeader": "x-amz-grant-read-acp" } }, @@ -28254,14 +30530,14 @@ "GrantWriteACP": { "target": "com.amazonaws.s3#GrantWriteACP", "traits": { - "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable bucket.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable bucket.

\n

This functionality is not supported for Amazon S3 on Outposts.

", "smithy.api#httpHeader": "x-amz-grant-write-acp" } }, "Key": { "target": "com.amazonaws.s3#ObjectKey", "traits": { - "smithy.api#documentation": "

Key for which the PUT action was initiated.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Key for which the PUT action was initiated.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -28278,14 +30554,14 @@ "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

VersionId used to reference a specific version of the object.

", + "smithy.api#documentation": "

Version ID used to reference a specific version of the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpQuery": "versionId" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -28307,7 +30583,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Applies a legal hold configuration to the specified object. For more information, see\n Locking\n Objects.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Applies a legal hold configuration to the specified object. For more information, see\n Locking\n Objects.

\n

This functionality is not supported for Amazon S3 on Outposts.

", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?legal-hold", @@ -28335,7 +30611,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object that you want to place a legal hold on.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object that you want to place a legal hold on.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -28382,14 +30658,14 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -28411,7 +30687,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Places an Object Lock configuration on the specified bucket. The rule specified in the\n Object Lock configuration will be applied by default to every new object placed in the\n specified bucket. For more information, see Locking Objects.

\n \n
    \n
  • \n

    The DefaultRetention settings require both a mode and a\n period.

    \n
  • \n
  • \n

    The DefaultRetention period can be either Days or\n Years but you must select one. You cannot specify\n Days and Years at the same time.

    \n
  • \n
  • \n

    You can enable Object Lock for new or existing buckets. For more\n information, see Configuring Object\n Lock.

    \n
  • \n
\n
", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Places an Object Lock configuration on the specified bucket. The rule specified in the\n Object Lock configuration will be applied by default to every new object placed in the\n specified bucket. For more information, see Locking Objects.

\n \n
    \n
  • \n

    The DefaultRetention settings require both a mode and a\n period.

    \n
  • \n
  • \n

    The DefaultRetention period can be either Days or\n Years but you must select one. You cannot specify\n Days and Years at the same time.

    \n
  • \n
  • \n

    You can enable Object Lock for new or existing buckets. For more\n information, see Configuring Object\n Lock.

    \n
  • \n
\n
", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?object-lock", @@ -28478,14 +30754,14 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -28500,91 +30776,91 @@ "Expiration": { "target": "com.amazonaws.s3#Expiration", "traits": { - "smithy.api#documentation": "

If the expiration is configured for the object (see PutBucketLifecycleConfiguration), the response includes this header. It\n includes the expiry-date and rule-id key-value pairs that provide\n information about object expiration. The value of the rule-id is\n URL-encoded.

", + "smithy.api#documentation": "

If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It\n includes the expiry-date and rule-id key-value pairs that provide\n information about object expiration. The value of the rule-id is\n URL-encoded.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-expiration" } }, "ETag": { "target": "com.amazonaws.s3#ETag", "traits": { - "smithy.api#documentation": "

Entity tag for the uploaded object.

", + "smithy.api#documentation": "

Entity tag for the uploaded object.

\n

\n General purpose buckets - To ensure that data is not corrupted traversing the network, \n for objects where the \n ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

\n

\n Directory buckets - The ETag for the object in a directory bucket isn't the MD5 digest of the object.

", "smithy.api#httpHeader": "ETag" } }, "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "VersionId": { "target": "com.amazonaws.s3#ObjectVersionId", "traits": { - "smithy.api#documentation": "

Version of the object.

", + "smithy.api#documentation": "

Version ID of the object.

\n

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets in the Amazon S3\n User Guide. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-version-id" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header confirming the encryption algorithm used.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to confirm the encryption algorithm that's used.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide round-trip message integrity verification of\n the customer-provided encryption key.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide the round-trip message integrity verification of\n the customer-provided encryption key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If x-amz-server-side-encryption has a valid value of aws:kms\n or aws:kms:dsse, this header specifies the ID of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object.

", + "smithy.api#documentation": "

If x-amz-server-side-encryption has a valid value of aws:kms\n or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs. This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject or CopyObject\n operations on this object.

", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs. This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject or CopyObject\n operations on this object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -28605,7 +30881,7 @@ "ACL": { "target": "com.amazonaws.s3#ObjectCannedACL", "traits": { - "smithy.api#documentation": "

The canned ACL to apply to the object. For more information, see Canned\n ACL.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

The canned ACL to apply to the object. For more information, see Canned\n ACL in the Amazon S3 User Guide.

\n

When adding a new object, you can use headers to grant ACL-based permissions to\n individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are\n then added to the ACL on the object. By default, all objects are private. Only the owner\n has full access control. For more information, see Access Control List (ACL) Overview\n and Managing\n ACLs Using the REST API in the Amazon S3 User Guide.

\n

If the bucket that you're uploading objects to uses the bucket owner enforced setting\n for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that\n use this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control\n canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that\n contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a\n 400 error with the error code AccessControlListNotSupported.\n For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-acl" } }, @@ -28620,7 +30896,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name to which the PUT action was initiated.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name to which the PUT action was initiated.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -28631,7 +30907,7 @@ "CacheControl": { "target": "com.amazonaws.s3#CacheControl", "traits": { - "smithy.api#documentation": "

Can be used to specify caching behavior along the request/reply chain. For more\n information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

", + "smithy.api#documentation": "

Can be used to specify caching behavior along the request/reply chain. For more\n information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

", "smithy.api#httpHeader": "Cache-Control" } }, @@ -28666,7 +30942,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

", + "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

\n \n

The Content-MD5 header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information about Amazon S3 Object Lock, see Amazon S3 Object Lock\n Overview in the Amazon S3 User Guide.

\n
\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -28680,7 +30956,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    CRC32

    \n
  • \n
  • \n

    CRC32C

    \n
  • \n
  • \n

    SHA1

    \n
  • \n
  • \n

    SHA256

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -28722,28 +30998,28 @@ "GrantFullControl": { "target": "com.amazonaws.s3#GrantFullControl", "traits": { - "smithy.api#documentation": "

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-full-control" } }, "GrantRead": { "target": "com.amazonaws.s3#GrantRead", "traits": { - "smithy.api#documentation": "

Allows grantee to read the object data and its metadata.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to read the object data and its metadata.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-read" } }, "GrantReadACP": { "target": "com.amazonaws.s3#GrantReadACP", "traits": { - "smithy.api#documentation": "

Allows grantee to read the object ACL.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to read the object ACL.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-read-acp" } }, "GrantWriteACP": { "target": "com.amazonaws.s3#GrantWriteACP", "traits": { - "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable object.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "

Allows grantee to write the ACL for the applicable object.

\n \n
    \n
  • \n

    This functionality is not supported for directory buckets.

    \n
  • \n
  • \n

    This functionality is not supported for Amazon S3 on Outposts.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-grant-write-acp" } }, @@ -28768,63 +31044,63 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", + "smithy.api#documentation": "

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n

\n General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.

\n

\n Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.

\n \n
    \n
  • \n

    For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects.

    \n
  • \n
  • \n

    Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class.

    \n
  • \n
\n
", "smithy.api#httpHeader": "x-amz-storage-class" } }, "WebsiteRedirectLocation": { "target": "com.amazonaws.s3#WebsiteRedirectLocation", "traits": { - "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata. For information about object metadata, see Object Key and Metadata.

\n

In the following example, the request header sets the redirect to an object\n (anotherPage.html) in the same bucket:

\n

\n x-amz-website-redirect-location: /anotherPage.html\n

\n

In the following example, the request header sets the object redirect to another\n website:

\n

\n x-amz-website-redirect-location: http://www.example.com/\n

\n

For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and\n How to\n Configure Website Page Redirects.

", + "smithy.api#documentation": "

If the bucket is configured as a website, redirects requests for this object to another\n object in the same bucket or to an external URL. Amazon S3 stores the value of this header in\n the object metadata. For information about object metadata, see Object Key and Metadata in the Amazon S3\n User Guide.

\n

In the following example, the request header sets the redirect to an object\n (anotherPage.html) in the same bucket:

\n

\n x-amz-website-redirect-location: /anotherPage.html\n

\n

In the following example, the request header sets the object redirect to another\n website:

\n

\n x-amz-website-redirect-location: http://www.example.com/\n

\n

For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and\n How to\n Configure Website Page Redirects in the Amazon S3\n User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-website-redirect-location" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use to when encrypting the object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example,\n AES256).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If x-amz-server-side-encryption has a valid value of aws:kms\n or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data. If the KMS key does not exist in the same\n account that's issuing the command, you must use the full ARN and not just the ID.

", + "smithy.api#documentation": "

If x-amz-server-side-encryption has a valid value of aws:kms\n or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data. If the KMS key does not exist in the same\n account that's issuing the command, you must use the full ARN and not just the ID.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject or CopyObject operations on\n this object. This value must be explicitly added during CopyObject\n operations.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject or CopyObject operations on\n this object. This value must be explicitly added during CopyObject operations.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.

\n

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3\n Bucket Key.

", + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.

\n

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3\n Bucket Key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -28837,35 +31113,35 @@ "Tagging": { "target": "com.amazonaws.s3#TaggingHeader", "traits": { - "smithy.api#documentation": "

The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For\n example, \"Key1=Value1\")

", + "smithy.api#documentation": "

The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For\n example, \"Key1=Value1\")

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-tagging" } }, "ObjectLockMode": { "target": "com.amazonaws.s3#ObjectLockMode", "traits": { - "smithy.api#documentation": "

The Object Lock mode that you want to apply to this object.

", + "smithy.api#documentation": "

The Object Lock mode that you want to apply to this object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-mode" } }, "ObjectLockRetainUntilDate": { "target": "com.amazonaws.s3#ObjectLockRetainUntilDate", "traits": { - "smithy.api#documentation": "

The date and time when you want this object's Object Lock to expire. Must be formatted\n as a timestamp parameter.

", + "smithy.api#documentation": "

The date and time when you want this object's Object Lock to expire. Must be formatted\n as a timestamp parameter.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-retain-until-date" } }, "ObjectLockLegalHoldStatus": { "target": "com.amazonaws.s3#ObjectLockLegalHoldStatus", "traits": { - "smithy.api#documentation": "

Specifies whether a legal hold will be applied to this object. For more information\n about S3 Object Lock, see Object Lock.

", + "smithy.api#documentation": "

Specifies whether a legal hold will be applied to this object. For more information\n about S3 Object Lock, see Object Lock in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-lock-legal-hold" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -28887,7 +31163,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Places an Object Retention configuration on an object. For more information, see Locking Objects.\n Users or accounts require the s3:PutObjectRetention permission in order to\n place an Object Retention configuration on objects. Bypassing a Governance Retention\n configuration requires the s3:BypassGovernanceRetention permission.

\n

This action is not supported by Amazon S3 on Outposts.

", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Places an Object Retention configuration on an object. For more information, see Locking Objects.\n Users or accounts require the s3:PutObjectRetention permission in order to\n place an Object Retention configuration on objects. Bypassing a Governance Retention\n configuration requires the s3:BypassGovernanceRetention permission.

\n

This functionality is not supported for Amazon S3 on Outposts.

", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?retention", @@ -28915,7 +31191,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name that contains the object you want to apply this Object Retention\n configuration to.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name that contains the object you want to apply this Object Retention\n configuration to.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -28969,14 +31245,14 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -28998,7 +31274,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Sets the supplied tag-set to an object that already exists in a bucket. A tag is a\n key-value pair. For more information, see Object Tagging.

\n

You can associate tags with an object by sending a PUT request against the tagging\n subresource that is associated with the object. You can retrieve tags by sending a GET\n request. For more information, see GetObjectTagging.

\n

For tagging-related restrictions related to characters and encodings, see Tag\n Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per\n object.

\n

To use this operation, you must have permission to perform the\n s3:PutObjectTagging action. By default, the bucket owner has this\n permission and can grant this permission to others.

\n

To put tags of any other version, use the versionId query parameter. You\n also need permission for the s3:PutObjectVersionTagging action.

\n

\n PutObjectTagging has the following special errors. For more Amazon S3 errors\n see, Error\n Responses.

\n
    \n
  • \n

    \n InvalidTag - The tag provided was not a valid tag. This error\n can occur if the tag did not pass input validation. For more information, see Object\n Tagging.

    \n
  • \n
  • \n

    \n MalformedXML - The XML provided does not match the\n schema.

    \n
  • \n
  • \n

    \n OperationAborted - A conflicting conditional action is\n currently in progress against this resource. Please try again.

    \n
  • \n
  • \n

    \n InternalError - The service was unable to apply the provided\n tag to the object.

    \n
  • \n
\n

The following operations are related to PutObjectTagging:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Sets the supplied tag-set to an object that already exists in a bucket. A tag is a\n key-value pair. For more information, see Object Tagging.

\n

You can associate tags with an object by sending a PUT request against the tagging\n subresource that is associated with the object. You can retrieve tags by sending a GET\n request. For more information, see GetObjectTagging.

\n

For tagging-related restrictions related to characters and encodings, see Tag\n Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per\n object.

\n

To use this operation, you must have permission to perform the\n s3:PutObjectTagging action. By default, the bucket owner has this\n permission and can grant this permission to others.

\n

To put tags of any other version, use the versionId query parameter. You\n also need permission for the s3:PutObjectVersionTagging action.

\n

\n PutObjectTagging has the following special errors. For more Amazon S3 errors\n see, Error\n Responses.

\n
    \n
  • \n

    \n InvalidTag - The tag provided was not a valid tag. This error\n can occur if the tag did not pass input validation. For more information, see Object\n Tagging.

    \n
  • \n
  • \n

    \n MalformedXML - The XML provided does not match the\n schema.

    \n
  • \n
  • \n

    \n OperationAborted - A conflicting conditional action is\n currently in progress against this resource. Please try again.

    \n
  • \n
  • \n

    \n InternalError - The service was unable to apply the provided\n tag to the object.

    \n
  • \n
\n

The following operations are related to PutObjectTagging:

\n ", "smithy.api#examples": [ { "title": "To add tags to an existing object", @@ -29052,7 +31328,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -29085,7 +31361,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -29101,7 +31377,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, @@ -29129,11 +31405,16 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket.\n To use this operation, you must have the s3:PutBucketPublicAccessBlock\n permission. For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n \n

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or\n an object, it checks the PublicAccessBlock configuration for both the\n bucket (or the bucket that contains the object) and the bucket owner's account. If the\n PublicAccessBlock configurations are different between the bucket and\n the account, Amazon S3 uses the most restrictive combination of the bucket-level and\n account-level settings.

\n
\n

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

\n

The following operations are related to PutPublicAccessBlock:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket.\n To use this operation, you must have the s3:PutBucketPublicAccessBlock\n permission. For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.

\n \n

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or\n an object, it checks the PublicAccessBlock configuration for both the\n bucket (or the bucket that contains the object) and the bucket owner's account. If the\n PublicAccessBlock configurations are different between the bucket and\n the account, Amazon S3 uses the most restrictive combination of the bucket-level and\n account-level settings.

\n
\n

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

\n

The following operations are related to PutPublicAccessBlock:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?publicAccessBlock", "code": 200 + }, + "smithy.rules#staticContextParams": { + "UseS3ExpressControlEndpoint": { + "value": true + } } } }, @@ -29161,7 +31442,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -29177,7 +31458,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -29332,6 +31613,15 @@ "smithy.api#documentation": "

Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3\n bucket.

" } }, + "com.amazonaws.s3#Region": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.s3#ReplaceKeyPrefixWith": { "type": "string" }, @@ -29626,7 +31916,7 @@ } }, "traits": { - "smithy.api#documentation": "

If present, indicates that the requester was successfully charged for the\n request.

" + "smithy.api#documentation": "

If present, indicates that the requester was successfully charged for the\n request.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "com.amazonaws.s3#RequestPayer": { @@ -29640,7 +31930,7 @@ } }, "traits": { - "smithy.api#documentation": "

Confirms that the requester knows that they will be charged for the request. Bucket\n owners need not specify this parameter in their requests. If either the source or\n destination Amazon S3 bucket has Requester Pays enabled, the requester will pay for\n corresponding charges to copy the object. For information about downloading objects from\n Requester Pays buckets, see Downloading Objects in\n Requester Pays Buckets in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

Confirms that the requester knows that they will be charged for the request. Bucket\n owners need not specify this parameter in their requests. If either the source or\n destination S3 bucket has Requester Pays enabled, the requester will pay for\n corresponding charges to copy the object. For information about downloading objects from\n Requester Pays buckets, see Downloading Objects in\n Requester Pays Buckets in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets.

\n
" } }, "com.amazonaws.s3#RequestPaymentConfiguration": { @@ -29722,7 +32012,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "

Restores an archived copy of an object back into Amazon S3

\n

This action is not supported by Amazon S3 on Outposts.

\n

This action performs the following types of requests:

\n
    \n
  • \n

    \n select - Perform a select query on an archived object

    \n
  • \n
  • \n

    \n restore an archive - Restore an archived object

    \n
  • \n
\n

For more information about the S3 structure in the request body, see the\n following:

\n \n

Define the SQL expression for the SELECT type of restoration for your query\n in the request body's SelectParameters structure. You can use expressions like\n the following examples.

\n
    \n
  • \n

    The following expression returns all records from the specified object.

    \n

    \n SELECT * FROM Object\n

    \n
  • \n
  • \n

    Assuming that you are not using any headers for data stored in the object, you can\n specify columns with positional headers.

    \n

    \n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100\n

    \n
  • \n
  • \n

    If you have headers and you set the fileHeaderInfo in the\n CSV structure in the request body to USE, you can\n specify headers in the query. (If you set the fileHeaderInfo field to\n IGNORE, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.

    \n

    \n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s\n

    \n
  • \n
\n

When making a select request, you can also do the following:

\n
    \n
  • \n

    To expedite your queries, specify the Expedited tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.

    \n
  • \n
  • \n

    Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.

    \n
  • \n
\n

The following are additional important facts about the select feature:

\n
    \n
  • \n

    The output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle configuration.

    \n
  • \n
  • \n

    You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n duplicate requests, so avoid issuing duplicate requests.

    \n
  • \n
  • \n

    Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409.

    \n
  • \n
\n
\n
Permissions
\n
\n

To use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n
\n
Restoring objects
\n
\n

Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.

\n

To restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.

\n

When restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:

\n
    \n
  • \n

    \n Expedited - Expedited retrievals allow you to quickly access\n your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests\n for restoring archives are required. For all but the largest archived\n objects (250 MB+), data accessed using Expedited retrievals is typically\n made available within 1–5 minutes. Provisioned capacity ensures that\n retrieval capacity for Expedited retrievals is available when you need it.\n Expedited retrievals and provisioned capacity are not available for objects\n stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
  • \n

    \n Standard - Standard retrievals allow you to access any of\n your archived objects within several hours. This is the default option for\n retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored\n in S3 Intelligent-Tiering.

    \n
  • \n
  • \n

    \n Bulk - Bulk retrievals free for objects stored in the\n S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes,\n enabling you to retrieve large amounts, even petabytes, of data at no cost.\n Bulk retrievals typically finish within 5–12 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost\n retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
\n

For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.

\n

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.

\n

To get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.

\n

After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.

\n

If your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.

\n
\n
Responses
\n
\n

A successful action returns either the 200 OK or 202\n Accepted status code.

\n
    \n
  • \n

    If the object is not previously restored, then Amazon S3 returns 202\n Accepted in the response.

    \n
  • \n
  • \n

    If the object is previously restored, Amazon S3 returns 200 OK in\n the response.

    \n
  • \n
\n
    \n
  • \n

    Special errors:

    \n
      \n
    • \n

      \n Code: RestoreAlreadyInProgress\n

      \n
    • \n
    • \n

      \n Cause: Object restore is already in progress. (This error\n does not apply to SELECT type requests.)\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 409 Conflict\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: Client\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n Code: GlacierExpeditedRetrievalNotAvailable\n

      \n
    • \n
    • \n

      \n Cause: expedited retrievals are currently not available.\n Try again later. (Returned if there is insufficient capacity to\n process the Expedited request. This error applies only to Expedited\n retrievals and not to S3 Standard or Bulk retrievals.)\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 503\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: N/A\n

      \n
    • \n
    \n
  • \n
\n
\n
\n

The following operations are related to RestoreObject:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Restores an archived copy of an object back into Amazon S3

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

This action performs the following types of requests:

\n
    \n
  • \n

    \n select - Perform a select query on an archived object

    \n
  • \n
  • \n

    \n restore an archive - Restore an archived object

    \n
  • \n
\n

For more information about the S3 structure in the request body, see the\n following:

\n \n

Define the SQL expression for the SELECT type of restoration for your query\n in the request body's SelectParameters structure. You can use expressions like\n the following examples.

\n
    \n
  • \n

    The following expression returns all records from the specified object.

    \n

    \n SELECT * FROM Object\n

    \n
  • \n
  • \n

    Assuming that you are not using any headers for data stored in the object, you can\n specify columns with positional headers.

    \n

    \n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100\n

    \n
  • \n
  • \n

    If you have headers and you set the fileHeaderInfo in the\n CSV structure in the request body to USE, you can\n specify headers in the query. (If you set the fileHeaderInfo field to\n IGNORE, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.

    \n

    \n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s\n

    \n
  • \n
\n

When making a select request, you can also do the following:

\n
    \n
  • \n

    To expedite your queries, specify the Expedited tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.

    \n
  • \n
  • \n

    Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.

    \n
  • \n
\n

The following are additional important facts about the select feature:

\n
    \n
  • \n

    The output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle configuration.

    \n
  • \n
  • \n

    You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n duplicate requests, so avoid issuing duplicate requests.

    \n
  • \n
  • \n

    Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409.

    \n
  • \n
\n
\n
Permissions
\n
\n

To use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n
\n
Restoring objects
\n
\n

Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.

\n

To restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.

\n

When restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:

\n
    \n
  • \n

    \n Expedited - Expedited retrievals allow you to quickly access\n your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests\n for restoring archives are required. For all but the largest archived\n objects (250 MB+), data accessed using Expedited retrievals is typically\n made available within 1–5 minutes. Provisioned capacity ensures that\n retrieval capacity for Expedited retrievals is available when you need it.\n Expedited retrievals and provisioned capacity are not available for objects\n stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
  • \n

    \n Standard - Standard retrievals allow you to access any of\n your archived objects within several hours. This is the default option for\n retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored\n in S3 Intelligent-Tiering.

    \n
  • \n
  • \n

    \n Bulk - Bulk retrievals free for objects stored in the\n S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes,\n enabling you to retrieve large amounts, even petabytes, of data at no cost.\n Bulk retrievals typically finish within 5–12 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost\n retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
\n

For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.

\n

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.

\n

To get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.

\n

After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.

\n

If your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.

\n
\n
Responses
\n
\n

A successful action returns either the 200 OK or 202\n Accepted status code.

\n
    \n
  • \n

    If the object is not previously restored, then Amazon S3 returns 202\n Accepted in the response.

    \n
  • \n
  • \n

    If the object is previously restored, Amazon S3 returns 200 OK in\n the response.

    \n
  • \n
\n
    \n
  • \n

    Special errors:

    \n
      \n
    • \n

      \n Code: RestoreAlreadyInProgress\n

      \n
    • \n
    • \n

      \n Cause: Object restore is already in progress. (This error\n does not apply to SELECT type requests.)\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 409 Conflict\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: Client\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n Code: GlacierExpeditedRetrievalNotAvailable\n

      \n
    • \n
    • \n

      \n Cause: expedited retrievals are currently not available.\n Try again later. (Returned if there is insufficient capacity to\n process the Expedited request. This error applies only to Expedited\n retrievals and not to S3 Standard or Bulk retrievals.)\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 503\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: N/A\n

      \n
    • \n
    \n
  • \n
\n
\n
\n

The following operations are related to RestoreObject:

\n ", "smithy.api#examples": [ { "title": "To restore an archived object", @@ -29774,7 +32064,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object to restore.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object to restore.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -29813,14 +32103,14 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -29910,7 +32200,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.

\n \n

This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects.

\n
" } }, "com.amazonaws.s3#Role": { @@ -30093,7 +32383,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This action is not supported by Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

\n

\n
\n
Permissions
\n
\n

You must have s3:GetObject permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.

\n
\n
Object Data Formats
\n
\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n
    \n
  • \n

    \n CSV, JSON, and Parquet - Objects must be in CSV,\n JSON, or Parquet format.

    \n
  • \n
  • \n

    \n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.

    \n
  • \n
  • \n

    \n GZIP or BZIP2 - CSV and JSON files can be compressed\n using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that\n Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar\n compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support\n whole-object compression for Parquet objects.

    \n
  • \n
  • \n

    \n Server-side encryption - Amazon S3 Select supports\n querying objects that are protected with server-side encryption.

    \n

    For objects that are encrypted with customer-provided encryption keys\n (SSE-C), you must use HTTPS, and you must use the headers that are\n documented in the GetObject. For more\n information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys)\n in the Amazon S3 User Guide.

    \n

    For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently,\n so you don't need to specify anything. For more information about\n server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Working with the Response Body
\n
\n

Given the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding header with\n chunked as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.

\n
\n
GetObject Support
\n
\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n
    \n
  • \n

    \n Range: Although you can specify a scan range for an Amazon S3 Select\n request (see SelectObjectContentRequest - ScanRange in the request\n parameters), you cannot specify the range of bytes of an object to return.\n

    \n
  • \n
  • \n

    The GLACIER, DEEP_ARCHIVE, and\n REDUCED_REDUNDANCY storage classes, or the\n ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class: You cannot\n query objects in the GLACIER, DEEP_ARCHIVE, or\n REDUCED_REDUNDANCY storage classes, nor objects in the\n ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class. For more\n information about storage classes, see Using Amazon S3\n storage classes in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Special Errors
\n
\n

For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n

\n
\n
\n

The following operations are related to SelectObjectContent:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

\n

\n
\n
Permissions
\n
\n

You must have the s3:GetObject permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.

\n
\n
Object Data Formats
\n
\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n
    \n
  • \n

    \n CSV, JSON, and Parquet - Objects must be in CSV,\n JSON, or Parquet format.

    \n
  • \n
  • \n

    \n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.

    \n
  • \n
  • \n

    \n GZIP or BZIP2 - CSV and JSON files can be compressed\n using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that\n Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar\n compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support\n whole-object compression for Parquet objects.

    \n
  • \n
  • \n

    \n Server-side encryption - Amazon S3 Select supports\n querying objects that are protected with server-side encryption.

    \n

    For objects that are encrypted with customer-provided encryption keys\n (SSE-C), you must use HTTPS, and you must use the headers that are\n documented in the GetObject. For more\n information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys)\n in the Amazon S3 User Guide.

    \n

    For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently,\n so you don't need to specify anything. For more information about\n server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Working with the Response Body
\n
\n

Given the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding header with\n chunked as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.

\n
\n
GetObject Support
\n
\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n
    \n
  • \n

    \n Range: Although you can specify a scan range for an Amazon S3 Select\n request (see SelectObjectContentRequest - ScanRange in the request\n parameters), you cannot specify the range of bytes of an object to return.\n

    \n
  • \n
  • \n

    The GLACIER, DEEP_ARCHIVE, and\n REDUCED_REDUNDANCY storage classes, or the\n ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class: You cannot\n query objects in the GLACIER, DEEP_ARCHIVE, or\n REDUCED_REDUNDANCY storage classes, nor objects in the\n ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class. For more\n information about storage classes, see Using Amazon S3\n storage classes in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Special Errors
\n
\n

For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n

\n
\n
\n

The following operations are related to SelectObjectContent:

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent", @@ -30241,7 +32531,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -30374,6 +32664,72 @@ "target": "com.amazonaws.s3#ServerSideEncryptionRule" } }, + "com.amazonaws.s3#SessionCredentialValue": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.s3#SessionCredentials": { + "type": "structure", + "members": { + "AccessKeyId": { + "target": "com.amazonaws.s3#AccessKeyIdValue", + "traits": { + "smithy.api#documentation": "

A unique identifier that's associated with a secret access key. The access key ID and the secret access key are used together to sign programmatic Amazon Web Services requests cryptographically.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "AccessKeyId" + } + }, + "SecretAccessKey": { + "target": "com.amazonaws.s3#SessionCredentialValue", + "traits": { + "smithy.api#documentation": "

A key that's used with the access key ID to cryptographically sign programmatic Amazon Web Services requests. Signing a request identifies the sender and prevents the request from being altered.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "SecretAccessKey" + } + }, + "SessionToken": { + "target": "com.amazonaws.s3#SessionCredentialValue", + "traits": { + "smithy.api#documentation": "

A part of the temporary security credentials. The session token is used to validate the temporary security credentials. \n \n

", + "smithy.api#required": {}, + "smithy.api#xmlName": "SessionToken" + } + }, + "Expiration": { + "target": "com.amazonaws.s3#SessionExpiration", + "traits": { + "smithy.api#documentation": "

Temporary security credentials expire after a specified interval. After temporary credentials expire, any calls that you make with those credentials will fail. So you must generate a new set of temporary credentials. \n Temporary credentials cannot be extended or refreshed beyond the original specified interval.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "Expiration" + } + } + }, + "traits": { + "smithy.api#documentation": "

The established temporary security credentials of the session.

\n \n

\n Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint APIs on directory buckets.

\n
" + } + }, + "com.amazonaws.s3#SessionExpiration": { + "type": "timestamp" + }, + "com.amazonaws.s3#SessionMode": { + "type": "enum", + "members": { + "ReadOnly": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ReadOnly" + } + }, + "ReadWrite": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ReadWrite" + } + } + } + }, "com.amazonaws.s3#Setting": { "type": "boolean" }, @@ -30552,6 +32908,12 @@ "traits": { "smithy.api#enumValue": "SNOW" } + }, + "EXPRESS_ONEZONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPRESS_ONEZONE" + } } } }, @@ -30949,7 +33311,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "

Uploads a part in a multipart upload.

\n \n

In this operation, you provide part data in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n

\n
\n

You must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier, that you must include in your upload part request.

\n

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n

To ensure that data is not corrupted when traversing the network, specify the\n Content-MD5 header in the upload part request. Amazon S3 checks the part data\n against the provided MD5 value. If they do not match, Amazon S3 returns an error.

\n

If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the\n x-amz-content-sha256 header as a checksum instead of\n Content-MD5. For more information see Authenticating\n Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).

\n

\n Note: After you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.

\n

For more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .

\n

For information on the permissions required to use the multipart upload API, go to\n Multipart\n Upload and Permissions in the Amazon S3 User Guide.

\n

Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. You have three\n mutually exclusive options to protect data using server-side encryption in Amazon S3, depending\n on how you choose to manage the encryption keys. Specifically, the encryption key options\n are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys\n (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by\n default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption\n with other key options. The option you use depends on whether you want to use KMS keys\n (SSE-KMS) or provide your own encryption key (SSE-C). If you choose to provide your own\n encryption key, the request headers you provide in the request must match the headers you\n used in the request to initiate the upload by using CreateMultipartUpload.\n For more information, go to Using Server-Side\n Encryption in the Amazon S3 User Guide.

\n

Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are\n using a customer-provided encryption key (SSE-C), you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.

\n

If you requested server-side encryption using a customer-provided encryption key (SSE-C)\n in your initiate multipart upload request, you must provide identical encryption\n information in each part upload using the following headers.

\n
    \n
  • \n

    x-amz-server-side-encryption-customer-algorithm

    \n
  • \n
  • \n

    x-amz-server-side-encryption-customer-key

    \n
  • \n
  • \n

    x-amz-server-side-encryption-customer-key-MD5

    \n
  • \n
\n

\n UploadPart has the following special errors:

\n
    \n
  • \n
      \n
    • \n

      \n Code: NoSuchUpload\n

      \n
    • \n
    • \n

      \n Cause: The specified multipart upload does not exist. The upload\n ID might be invalid, or the multipart upload might have been aborted or\n completed.\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 404 Not Found \n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: Client\n

      \n
    • \n
    \n
  • \n
\n

The following operations are related to UploadPart:

\n ", + "smithy.api#documentation": "

Uploads a part in a multipart upload.

\n \n

In this operation, you provide new data as a part of an object in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n

\n
\n

You must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier that you must include in your upload part request.

\n

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n \n

After you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.

\n
\n

For more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information on the permissions required to use the multipart upload API, see \n Multipart\n Upload and Permissions in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Data integrity
\n
\n

\n General purpose bucket - To ensure that data is not corrupted traversing the network, specify the\n Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the\n x-amz-content-sha256 header as a checksum instead of\n Content-MD5. For more information see Authenticating\n Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).

\n \n

\n Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.

\n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. You have \n mutually exclusive options to protect data using server-side encryption in Amazon S3, depending\n on how you choose to manage the encryption keys. Specifically, the encryption key options\n are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys\n (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by\n default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption\n with other key options. The option you use depends on whether you want to use KMS keys\n (SSE-KMS) or provide your own encryption key (SSE-C).

    \n

    Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are\n using a customer-provided encryption key (SSE-C), you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.

    \n

    If you request server-side encryption using a customer-provided encryption key (SSE-C)\n in your initiate multipart upload request, you must provide identical encryption\n information in each part upload using the following request headers.

    \n
      \n
    • \n

      x-amz-server-side-encryption-customer-algorithm

      \n
    • \n
    • \n

      x-amz-server-side-encryption-customer-key

      \n
    • \n
    • \n

      x-amz-server-side-encryption-customer-key-MD5

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

    \n
  • \n
\n

\n For more information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.

\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    • \n

      SOAP Fault Code Prefix: Client

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to UploadPart:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=UploadPart", @@ -30966,11 +33328,16 @@ "target": "com.amazonaws.s3#UploadPartCopyOutput" }, "traits": { - "smithy.api#documentation": "

Uploads a part by copying data from an existing object as data source. You specify the\n data source by adding the request header x-amz-copy-source in your request and\n a byte range by adding the request header x-amz-copy-source-range in your\n request.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n \n

Instead of using an existing object as part data, you might use the UploadPart\n action and provide data in your request.

\n
\n

You must initiate a multipart upload before you can upload any part. In response to your\n initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in\n your upload part request.

\n

For more information about using the UploadPartCopy operation, see the\n following:

\n
    \n
  • \n

    For conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    For information about permissions required to use the multipart upload API, see\n Multipart Upload and Permissions in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    For information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.

    \n
  • \n
  • \n

    For information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.

    \n
  • \n
\n

Note the following additional considerations about the request headers\n x-amz-copy-source-if-match, x-amz-copy-source-if-none-match,\n x-amz-copy-source-if-unmodified-since, and\n x-amz-copy-source-if-modified-since:

\n

\n
    \n
  • \n

    \n Consideration 1 - If both of the\n x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the\n request as follows:

    \n

    \n x-amz-copy-source-if-match condition evaluates to true,\n and;

    \n

    \n x-amz-copy-source-if-unmodified-since condition evaluates to\n false;

    \n

    Amazon S3 returns 200 OK and copies the data.\n

    \n
  • \n
  • \n

    \n Consideration 2 - If both of the\n x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the\n request as follows:

    \n

    \n x-amz-copy-source-if-none-match condition evaluates to\n false, and;

    \n

    \n x-amz-copy-source-if-modified-since condition evaluates to\n true;

    \n

    Amazon S3 returns 412 Precondition Failed response code.\n

    \n
  • \n
\n
\n
Versioning
\n
\n

If your bucket has versioning enabled, you could have multiple versions of the\n same object. By default, x-amz-copy-source identifies the current\n version of the object to copy. If the current version is a delete marker and you\n don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a\n 404 error, because the object does not exist. If you specify versionId in the\n x-amz-copy-source and the versionId is a delete marker, Amazon S3\n returns an HTTP 400 error, because you are not allowed to specify a delete marker\n as a version for the x-amz-copy-source.

\n

You can optionally specify a specific version of the source object to copy by\n adding the versionId subresource as shown in the following\n example:

\n

\n x-amz-copy-source: /bucket/object?versionId=version id\n

\n
\n
Special errors
\n
\n
    \n
  • \n
      \n
    • \n

      \n Code: NoSuchUpload\n

      \n
    • \n
    • \n

      \n Cause: The specified multipart upload does not exist. The\n upload ID might be invalid, or the multipart upload might have been\n aborted or completed.\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 404 Not Found\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n Code: InvalidRequest\n

      \n
    • \n
    • \n

      \n Cause: The specified copy source is not supported as a\n byte-range copy source.\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 400 Bad Request\n

      \n
    • \n
    \n
  • \n
\n
\n
\n

The following operations are related to UploadPartCopy:

\n ", + "smithy.api#documentation": "

Uploads a part by copying data from an existing object as data source. To specify the\n data source, you add the request header x-amz-copy-source in your request. To specify \n a byte range, you add the request header x-amz-copy-source-range in your\n request.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n \n

Instead of copying data from an existing object as part data, you might use the UploadPart\n action to upload new data as a part of an object in your request.

\n
\n

You must initiate a multipart upload before you can upload any part. In response to your\n initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in\n your upload part request.

\n

For conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Authentication and authorization
\n
\n

All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have READ access to the source object and WRITE\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have the \n s3:GetObject\n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have the \n s3:PubObject\n permission to write the object copy to the destination bucket.\n

      \n
    • \n
    \n

    For information about permissions required to use the multipart upload API, see\n Multipart Upload and Permissions in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in an UploadPartCopy operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object\n . \n By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key cannot be set to ReadOnly on the copy destination.

      \n
    • \n
    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose buckets - \n \n For information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.\n

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The\n upload ID might be invalid, or the multipart upload might have been\n aborted or completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidRequest\n

    \n
      \n
    • \n

      Description: The specified copy source is not supported as a\n byte-range copy source.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to UploadPartCopy:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=UploadPartCopy", "code": 200 + }, + "smithy.rules#staticContextParams": { + "DisableS3ExpressSessionAuth": { + "value": true + } } } }, @@ -30980,7 +33347,7 @@ "CopySourceVersionId": { "target": "com.amazonaws.s3#CopySourceVersionId", "traits": { - "smithy.api#documentation": "

The version of the source object that was copied, if you have enabled versioning on the\n source bucket.

", + "smithy.api#documentation": "

The version of the source object that was copied, if you have enabled versioning on the\n source bucket.

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-version-id" } }, @@ -30994,35 +33361,35 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header confirming the encryption algorithm used.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to confirm the encryption algorithm that's used.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide round-trip message integrity verification of\n the customer-provided encryption key.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide the round-trip message integrity verification of\n the customer-provided encryption key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

", + "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -31043,7 +33410,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -31054,7 +33421,7 @@ "CopySource": { "target": "com.amazonaws.s3#CopySource", "traits": { - "smithy.api#documentation": "

Specifies the source object for the copy operation. You specify the value in one of two\n formats, depending on whether you want to access the source object through an access point:

\n
    \n
  • \n

    For objects not accessed through an access point, specify the name of the source bucket\n and key of the source object, separated by a slash (/). For example, to copy the\n object reports/january.pdf from the bucket\n awsexamplebucket, use awsexamplebucket/reports/january.pdf.\n The value must be URL-encoded.

    \n
  • \n
  • \n

    For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:::accesspoint//object/. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

    \n \n

    Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.

    \n
    \n

    Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL-encoded.

    \n
  • \n
\n

To copy a specific version of an object, append ?versionId=\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.

", + "smithy.api#documentation": "

Specifies the source object for the copy operation. You specify the value in one of two\n formats, depending on whether you want to access the source object through an access point:

\n
    \n
  • \n

    For objects not accessed through an access point, specify the name of the source bucket\n and key of the source object, separated by a slash (/). For example, to copy the\n object reports/january.pdf from the bucket\n awsexamplebucket, use awsexamplebucket/reports/january.pdf.\n The value must be URL-encoded.

    \n
  • \n
  • \n

    For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:::accesspoint//object/. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

    \n \n
      \n
    • \n

      Amazon S3 supports copy operations using Access points only when the source and destination buckets are in the same Amazon Web Services Region.

      \n
    • \n
    • \n

      Access points are not supported by directory buckets.

      \n
    • \n
    \n
    \n

    Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL-encoded.

    \n
  • \n
\n

If your bucket has versioning enabled, you could have multiple versions of the\n same object. By default, x-amz-copy-source identifies the current\n version of the source object to copy. \n To copy a specific version of the source object to copy, append ?versionId=\n to the x-amz-copy-source request header (for example, \n x-amz-copy-source: /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).\n

\n

If the current version is a delete marker and you\n don't specify a versionId in the x-amz-copy-source request header, Amazon S3 returns a\n 404 Not Found error, because the object does not exist. If you specify versionId in the\n x-amz-copy-source and the versionId is a delete marker, Amazon S3\n returns an HTTP 400 Bad Request error, because you are not allowed to specify a delete marker\n as a version for the x-amz-copy-source.

\n \n

\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-copy-source", "smithy.api#required": {} } @@ -31062,28 +33429,28 @@ "CopySourceIfMatch": { "target": "com.amazonaws.s3#CopySourceIfMatch", "traits": { - "smithy.api#documentation": "

Copies the object if its entity tag (ETag) matches the specified tag.

", + "smithy.api#documentation": "

Copies the object if its entity tag (ETag) matches the specified tag.

\n

If both of the\n x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the\n request as follows:

\n

\n x-amz-copy-source-if-match condition evaluates to true,\n and;

\n

\n x-amz-copy-source-if-unmodified-since condition evaluates to\n false;

\n

Amazon S3 returns 200 OK and copies the data.\n

", "smithy.api#httpHeader": "x-amz-copy-source-if-match" } }, "CopySourceIfModifiedSince": { "target": "com.amazonaws.s3#CopySourceIfModifiedSince", "traits": { - "smithy.api#documentation": "

Copies the object if it has been modified since the specified time.

", + "smithy.api#documentation": "

Copies the object if it has been modified since the specified time.

\n

If both of the\n x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the\n request as follows:

\n

\n x-amz-copy-source-if-none-match condition evaluates to\n false, and;

\n

\n x-amz-copy-source-if-modified-since condition evaluates to\n true;

\n

Amazon S3 returns 412 Precondition Failed response code.\n

", "smithy.api#httpHeader": "x-amz-copy-source-if-modified-since" } }, "CopySourceIfNoneMatch": { "target": "com.amazonaws.s3#CopySourceIfNoneMatch", "traits": { - "smithy.api#documentation": "

Copies the object if its entity tag (ETag) is different than the specified ETag.

", + "smithy.api#documentation": "

Copies the object if its entity tag (ETag) is different than the specified ETag.

\n

If both of the\n x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the\n request as follows:

\n

\n x-amz-copy-source-if-none-match condition evaluates to\n false, and;

\n

\n x-amz-copy-source-if-modified-since condition evaluates to\n true;

\n

Amazon S3 returns 412 Precondition Failed response code.\n

", "smithy.api#httpHeader": "x-amz-copy-source-if-none-match" } }, "CopySourceIfUnmodifiedSince": { "target": "com.amazonaws.s3#CopySourceIfUnmodifiedSince", "traits": { - "smithy.api#documentation": "

Copies the object if it hasn't been modified since the specified time.

", + "smithy.api#documentation": "

Copies the object if it hasn't been modified since the specified time.

\n

If both of the\n x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the\n request as follows:

\n

\n x-amz-copy-source-if-match condition evaluates to true,\n and;

\n

\n x-amz-copy-source-if-unmodified-since condition evaluates to\n false;

\n

Amazon S3 returns 200 OK and copies the data.\n

", "smithy.api#httpHeader": "x-amz-copy-source-if-unmodified-since" } }, @@ -31121,42 +33488,42 @@ "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use to when encrypting the object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example,\n AES256).

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header. This must be the\n same encryption key specified in the initiate multipart upload request.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header. This must be the\n same encryption key specified in the initiate multipart upload request.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "CopySourceSSECustomerAlgorithm": { "target": "com.amazonaws.s3#CopySourceSSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use when decrypting the source object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when decrypting the source object (for example,\n AES256).

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-server-side-encryption-customer-algorithm" } }, "CopySourceSSECustomerKey": { "target": "com.amazonaws.s3#CopySourceSSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source\n object. The encryption key provided in this header must be one that was used when the\n source object was created.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source\n object. The encryption key provided in this header must be one that was used when the\n source object was created.

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-server-side-encryption-customer-key" } }, "CopySourceSSECustomerKeyMD5": { "target": "com.amazonaws.s3#CopySourceSSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported when the source object is in a directory bucket.

\n
", "smithy.api#httpHeader": "x-amz-copy-source-server-side-encryption-customer-key-MD5" } }, @@ -31169,14 +33536,14 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected destination bucket owner. If the account ID that you provide does not match the actual owner of the destination bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } }, "ExpectedSourceBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected source bucket owner. If the account ID that you provide does not match the actual owner of the source bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-source-expected-bucket-owner" } } @@ -31191,7 +33558,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -31205,56 +33572,56 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header confirming the encryption algorithm used.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to confirm the encryption algorithm that's used.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide round-trip message integrity verification of\n the customer-provided encryption key.

", + "smithy.api#documentation": "

If server-side encryption with a customer-provided encryption key was requested, the\n response will include this header to provide the round-trip message integrity verification of\n the customer-provided encryption key.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n was used for the object.

", + "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -31283,7 +33650,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket to which the multipart upload was initiated.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The name of the bucket to which the multipart upload was initiated.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format \n bucket_base_name--az-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.

\n

\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n \n

Access points and Object Lambda access points are not supported by directory buckets.

\n
\n

\n S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -31301,14 +33668,14 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated\n when using the command from the CLI. This parameter is required if object lock parameters\n are specified.

", + "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated\n when using the command from the CLI. This parameter is required if object lock parameters\n are specified.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any\n additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

\n

This checksum algorithm must be the same for all parts and it match the checksum value\n supplied in the CreateMultipartUpload request.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

\n

This checksum algorithm must be the same for all parts and it match the checksum value\n supplied in the CreateMultipartUpload request.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -31370,21 +33737,21 @@ "SSECustomerAlgorithm": { "target": "com.amazonaws.s3#SSECustomerAlgorithm", "traits": { - "smithy.api#documentation": "

Specifies the algorithm to use to when encrypting the object (for example,\n AES256).

", + "smithy.api#documentation": "

Specifies the algorithm to use when encrypting the object (for example,\n AES256).

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-algorithm" } }, "SSECustomerKey": { "target": "com.amazonaws.s3#SSECustomerKey", "traits": { - "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header. This must be the\n same encryption key specified in the initiate multipart upload request.

", + "smithy.api#documentation": "

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This\n value is used to store the object and then it is discarded; Amazon S3 does not store the\n encryption key. The key must be appropriate for use with the algorithm specified in the\n x-amz-server-side-encryption-customer-algorithm header. This must be the\n same encryption key specified in the initiate multipart upload request.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key" } }, "SSECustomerKeyMD5": { "target": "com.amazonaws.s3#SSECustomerKeyMD5", "traits": { - "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

", + "smithy.api#documentation": "

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses\n this header for a message integrity check to ensure that the encryption key was transmitted\n without error.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-customer-key-MD5" } }, @@ -31397,7 +33764,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -31493,7 +33860,7 @@ "smithy.api#auth": [ "aws.auth#sigv4" ], - "smithy.api#documentation": "

Passes transformed objects to a GetObject operation when using Object Lambda access points. For\n information about Object Lambda access points, see Transforming objects with\n Object Lambda access points in the Amazon S3 User Guide.

\n

This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute, RequestToken, StatusCode,\n ErrorCode, and ErrorMessage. The GetObject\n response metadata is supported so that the WriteGetObjectResponse caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject. When WriteGetObjectResponse is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject call might differ from what Amazon S3 would normally return.

\n

You can include any number of metadata headers. When including a metadata header, it\n should be prefaced with x-amz-meta. For example,\n x-amz-meta-my-custom-header: MyCustomValue. The primary use case for this\n is to forward GetObject metadata.

\n

Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to\n detect and redact personally identifiable information (PII) and decompress S3 objects.\n These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and\n can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point.

\n

Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a\n natural language processing (NLP) service using machine learning to find insights and\n relationships in text. It automatically detects personally identifiable information (PII)\n such as names, addresses, dates, credit card numbers, and social security numbers from\n documents in your Amazon S3 bucket.

\n

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural\n language processing (NLP) service using machine learning to find insights and relationships\n in text. It automatically redacts personally identifiable information (PII) such as names,\n addresses, dates, credit card numbers, and social security numbers from documents in your\n Amazon S3 bucket.

\n

Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is\n equipped to decompress objects stored in S3 in one of six compressed file formats including\n bzip2, gzip, snappy, zlib, zstandard and ZIP.

\n

For information on how to view and use these functions, see Using Amazon Web Services built Lambda\n functions in the Amazon S3 User Guide.

", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Passes transformed objects to a GetObject operation when using Object Lambda access points. For\n information about Object Lambda access points, see Transforming objects with\n Object Lambda access points in the Amazon S3 User Guide.

\n

This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute, RequestToken, StatusCode,\n ErrorCode, and ErrorMessage. The GetObject\n response metadata is supported so that the WriteGetObjectResponse caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject. When WriteGetObjectResponse is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject call might differ from what Amazon S3 would normally return.

\n

You can include any number of metadata headers. When including a metadata header, it\n should be prefaced with x-amz-meta. For example,\n x-amz-meta-my-custom-header: MyCustomValue. The primary use case for this\n is to forward GetObject metadata.

\n

Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to\n detect and redact personally identifiable information (PII) and decompress S3 objects.\n These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and\n can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point.

\n

Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a\n natural language processing (NLP) service using machine learning to find insights and\n relationships in text. It automatically detects personally identifiable information (PII)\n such as names, addresses, dates, credit card numbers, and social security numbers from\n documents in your Amazon S3 bucket.

\n

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural\n language processing (NLP) service using machine learning to find insights and relationships\n in text. It automatically redacts personally identifiable information (PII) such as names,\n addresses, dates, credit card numbers, and social security numbers from documents in your\n Amazon S3 bucket.

\n

Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is\n equipped to decompress objects stored in S3 in one of six compressed file formats including\n bzip2, gzip, snappy, zlib, zstandard and ZIP.

\n

For information on how to view and use these functions, see Using Amazon Web Services built Lambda\n functions in the Amazon S3 User Guide.

", "smithy.api#endpoint": { "hostPrefix": "{RequestRoute}." }, diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsGoDependency.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsGoDependency.java index 2c042eced55..d5c7b414a4e 100644 --- a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsGoDependency.java +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsGoDependency.java @@ -44,6 +44,7 @@ public class AwsGoDependency { public static final GoDependency INTERNAL_ENDPOINTS = aws("internal/endpoints"); public static final GoDependency INTERNAL_AUTH = aws("internal/auth", "internalauth"); public static final GoDependency INTERNAL_AUTH_SMITHY = aws("internal/auth/smithy", "internalauthsmithy"); + public static final GoDependency INTERNAL_CONTEXT = aws("internal/context", "internalcontext"); public static final GoDependency INTERNAL_ENDPOINTS_V2 = awsModuleDep("internal/endpoints/v2", null, Versions.INTERNAL_ENDPOINTS_V2, "endpoints"); diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsHttpPresignURLClientGenerator.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsHttpPresignURLClientGenerator.java index 9d7d02e7fb5..1d9589a98bf 100644 --- a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsHttpPresignURLClientGenerator.java +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/AwsHttpPresignURLClientGenerator.java @@ -433,9 +433,8 @@ private void writeConvertToPresignMiddleware( if (isS3ServiceShape(model, serviceShape)) { writer.write(""); - writer.write("// add multi-region access point presigner"); + writer.write("// extended s3 presigning"); - // ==== multi-region access point support Symbol PresignConstructor = SymbolUtils.createValueSymbolBuilder( "NewPresignHTTPRequestMiddleware", AwsCustomGoDependency.S3_CUSTOMIZATION ).build(); @@ -451,6 +450,7 @@ private void writeConvertToPresignMiddleware( writer.openBlock("signermv := $T($T{", "})", PresignConstructor, PresignOptions, () -> { writer.write("CredentialsProvider : options.Credentials,"); + writer.write("ExpressCredentials : options.ExpressCredentials,"); writer.write("V4Presigner : c.Presigner,"); writer.write("V4aPresigner : c.presignerV4a,"); writer.write("LogSigning : options.ClientLogMode.IsSigning(),"); @@ -460,8 +460,6 @@ private void writeConvertToPresignMiddleware( writer.write("if err != nil { return err }"); writer.write(""); - // ======= - writer.openBlock("if c.Expires < 0 {", "}", () -> { writer.addUseImports(SmithyGoDependency.FMT); writer.write( @@ -753,7 +751,7 @@ private GoWriter.Writable generateBody() { schemeID := rscheme.Scheme.SchemeID() $setSignerVersion:W - if schemeID == "aws.auth#sigv4" { + if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { ctx = $ctxSetName:T(ctx, sn) } diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/ResolveClientConfigFromSources.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/ResolveClientConfigFromSources.java index e217cb469f5..ac95defcdab 100644 --- a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/ResolveClientConfigFromSources.java +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/ResolveClientConfigFromSources.java @@ -66,6 +66,14 @@ public class ResolveClientConfigFromSources implements GoIntegration { .awsResolveFunction(SymbolUtils.createValueSymbolBuilder(DISABLE_MRAP_CONFIG_RESOLVER) .build()) .build(), + AddAwsConfigFields.AwsConfigField.builder() + .name("DisableExpressAuth") + .type(getUniversalSymbol("*bool")) + .generatedOnClient(false) + .servicePredicate(ResolveClientConfigFromSources::isS3Service) + .awsResolveFunction(SymbolUtils.createValueSymbolBuilder("resolveDisableExpressAuth") + .build()) + .build(), AddAwsConfigFields.AwsConfigField.builder() .name(ENDPOINT_DISCOVERY_OPTION) .type(ENDPOINT_DISCOVERY_OPTION_TYPE) diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/SdkGoTypes.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/SdkGoTypes.java index 5ac6fe610e6..9ef37ee873c 100644 --- a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/SdkGoTypes.java +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/SdkGoTypes.java @@ -57,6 +57,10 @@ public static final class Smithy { } } + public static final class Context { + public static final Symbol SetS3Backend = AwsGoDependency.INTERNAL_CONTEXT.valueSymbol("SetS3Backend"); + } + public static final class Endpoints { public static final Symbol MapFIPSRegion = AwsGoDependency.INTERNAL_ENDPOINTS.valueSymbol("MapFIPSRegion"); } @@ -73,6 +77,10 @@ public static final class V4A { public static final class ServiceCustomizations { public static final class S3 { public static final Symbol SetSignerVersion = AwsCustomGoDependency.S3_CUSTOMIZATION.valueSymbol("SetSignerVersion"); + public static final Symbol ExpressIdentityResolver = AwsCustomGoDependency.S3_CUSTOMIZATION.valueSymbol("ExpressIdentityResolver"); + public static final Symbol ExpressSigner = AwsCustomGoDependency.S3_CUSTOMIZATION.valueSymbol("ExpressSigner"); + public static final Symbol GetPropertiesBackend = AwsCustomGoDependency.S3_CUSTOMIZATION.valueSymbol("GetPropertiesBackend"); + public static final Symbol AddExpressDefaultChecksumMiddleware = AwsCustomGoDependency.S3_CUSTOMIZATION.valueSymbol("AddExpressDefaultChecksumMiddleware"); } public static final class S3Control { diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/S3BucketContext.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/S3BucketContext.java new file mode 100644 index 00000000000..380b485875d --- /dev/null +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/S3BucketContext.java @@ -0,0 +1,29 @@ +package software.amazon.smithy.aws.go.codegen.customization; + +import software.amazon.smithy.go.codegen.SymbolUtils; +import software.amazon.smithy.go.codegen.integration.GoIntegration; +import software.amazon.smithy.go.codegen.integration.MiddlewareRegistrar; +import software.amazon.smithy.go.codegen.integration.RuntimeClientPlugin; +import software.amazon.smithy.utils.ListUtils; + +import java.util.List; + +/** + * Adds the input bucket name to the context for S3 operations, which is required for a variety of custom S3 behaviors. + */ +public class S3BucketContext implements GoIntegration { + private final MiddlewareRegistrar putBucketContextMiddleware = + MiddlewareRegistrar.builder() + .resolvedFunction(SymbolUtils.createValueSymbolBuilder("addPutBucketContextMiddleware").build()) + .build(); + + @Override + public List getClientPlugins() { + return ListUtils.of( + RuntimeClientPlugin.builder() + .servicePredicate(S3ModelUtils::isServiceS3) + .registerMiddleware(putBucketContextMiddleware) + .build() + ); + } +} diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/S3ExpressAuthScheme.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/S3ExpressAuthScheme.java new file mode 100644 index 00000000000..95839530ea5 --- /dev/null +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/S3ExpressAuthScheme.java @@ -0,0 +1,161 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.smithy.aws.go.codegen.customization.auth; + +import software.amazon.smithy.aws.go.codegen.SdkGoTypes; +import software.amazon.smithy.aws.go.codegen.customization.S3ModelUtils; +import software.amazon.smithy.codegen.core.SymbolProvider; +import software.amazon.smithy.go.codegen.GoDelegator; +import software.amazon.smithy.go.codegen.GoSettings; +import software.amazon.smithy.go.codegen.GoWriter; +import software.amazon.smithy.go.codegen.SmithyGoTypes; +import software.amazon.smithy.go.codegen.SymbolUtils; +import software.amazon.smithy.go.codegen.integration.AuthSchemeDefinition; +import software.amazon.smithy.go.codegen.integration.ConfigField; +import software.amazon.smithy.go.codegen.integration.ConfigFieldResolver; +import software.amazon.smithy.go.codegen.integration.GoIntegration; +import software.amazon.smithy.go.codegen.integration.ProtocolGenerator; +import software.amazon.smithy.go.codegen.integration.RuntimeClientPlugin; +import software.amazon.smithy.model.Model; +import software.amazon.smithy.model.shapes.OperationShape; +import software.amazon.smithy.model.shapes.ServiceShape; +import software.amazon.smithy.model.traits.AuthTrait; +import software.amazon.smithy.utils.ListUtils; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; + +import static software.amazon.smithy.go.codegen.GoWriter.emptyGoTemplate; +import static software.amazon.smithy.go.codegen.GoWriter.goTemplate; + +public class S3ExpressAuthScheme implements GoIntegration { + private static final ConfigField s3ExpressCredentials = + ConfigField.builder() + .name("ExpressCredentials") + .type(SymbolUtils.createValueSymbolBuilder("ExpressCredentialsProvider").build()) + .documentation("The credentials provider for S3Express requests.") + .build(); + + private static final ConfigFieldResolver s3ExpressCredentialsResolver = + ConfigFieldResolver.builder() + .location(ConfigFieldResolver.Location.CLIENT) + .target(ConfigFieldResolver.Target.FINALIZATION) + .resolver(SymbolUtils.createValueSymbolBuilder("resolveExpressCredentials").build()) + .build(); + + private static final ConfigFieldResolver s3ExpressCredentialsClientFinalizer = + ConfigFieldResolver.builder() + .location(ConfigFieldResolver.Location.CLIENT) + .target(ConfigFieldResolver.Target.FINALIZATION_WITH_CLIENT) + .resolver(SymbolUtils.createValueSymbolBuilder("finalizeExpressCredentials").build()) + .withClientInput(true) + .build(); + + @Override + public void writeAdditionalFiles( + GoSettings settings, Model model, SymbolProvider symbolProvider, GoDelegator goDelegator + ) { + if (S3ModelUtils.isServiceS3(model, settings.getService(model))) { + goDelegator.useFileWriter("options.go", settings.getModuleName(), generateGetIdentityResolver()); + } + } + + @Override + public List getClientPlugins() { + return ListUtils.of( + RuntimeClientPlugin.builder() + .servicePredicate(S3ModelUtils::isServiceS3) + .addConfigField(s3ExpressCredentials) + .addConfigFieldResolver(s3ExpressCredentialsResolver) + .addConfigFieldResolver(s3ExpressCredentialsClientFinalizer) + .addAuthSchemeDefinition(SigV4S3ExpressTrait.ID, new SigV4S3Express()) + .build() + ); + } + + @Override + public Model preprocessModel(Model model, GoSettings settings) { + ServiceShape service = settings.getService(model); + if (!S3ModelUtils.isServiceS3(model, service)) { + return model; + } + + AuthTrait newAuth = new AuthTrait(buildSet(s -> { + s.addAll(service.expectTrait(AuthTrait.class).getValueSet()); + s.add(SigV4S3ExpressTrait.ID); + })); + return model.toBuilder() + .addShape( + service.toBuilder() + .addTrait(new SigV4S3ExpressTrait()) + .addTrait(newAuth) + .build() + ) + .build(); + } + + private static Set buildSet(Consumer> builder) { + Set s = new HashSet<>(); + builder.accept(s); + return Collections.unmodifiableSet(s); + } + + private static class SigV4S3Express implements AuthSchemeDefinition { + @Override + public GoWriter.Writable generateServiceOption(ProtocolGenerator.GenerationContext context, ServiceShape service) { + return emptyGoTemplate(); // not modeled + } + + @Override + public GoWriter.Writable generateOperationOption(ProtocolGenerator.GenerationContext context, OperationShape operation) { + return emptyGoTemplate(); // not modeled + } + + @Override + public GoWriter.Writable generateDefaultAuthScheme() { + return goTemplate(""" + $T($S, &$T{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + })""", + SdkGoTypes.Internal.Auth.NewHTTPAuthScheme, + SigV4S3ExpressTrait.ID.toString(), + SdkGoTypes.ServiceCustomizations.S3.ExpressSigner); + } + + @Override + public GoWriter.Writable generateOptionsIdentityResolver() { + return goTemplate("getExpressIdentityResolver(o)"); + } + } + + private GoWriter.Writable generateGetIdentityResolver() { + return goTemplate(""" + func getExpressIdentityResolver(o Options) $T { + if o.ExpressCredentials != nil { + return &$T{Provider: o.ExpressCredentials} + } + return nil + } + """, + SmithyGoTypes.Auth.IdentityResolver, + SdkGoTypes.ServiceCustomizations.S3.ExpressIdentityResolver); + } +} diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/SigV4S3ExpressTrait.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/SigV4S3ExpressTrait.java new file mode 100644 index 00000000000..63d0078c7ab --- /dev/null +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/auth/SigV4S3ExpressTrait.java @@ -0,0 +1,41 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.smithy.aws.go.codegen.customization.auth; + +import software.amazon.smithy.model.node.Node; +import software.amazon.smithy.model.shapes.ShapeId; +import software.amazon.smithy.model.traits.AbstractTrait; + +/** + * Synthetic trait for sigv4-s3express auth scheme. + */ +public final class SigV4S3ExpressTrait extends AbstractTrait { + public static final ShapeId ID = ShapeId.from("com.amazonaws.s3#sigv4express"); + + public SigV4S3ExpressTrait() { + super(ID, Node.objectNode()); + } + + @Override + protected Node createNode() { + return Node.objectNode(); + } + + @Override + public boolean isSynthetic() { + return true; + } +} diff --git a/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/s3/ExpressDefaultChecksum.java b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/s3/ExpressDefaultChecksum.java new file mode 100644 index 00000000000..d23337bde5a --- /dev/null +++ b/codegen/smithy-aws-go-codegen/src/main/java/software/amazon/smithy/aws/go/codegen/customization/s3/ExpressDefaultChecksum.java @@ -0,0 +1,74 @@ +package software.amazon.smithy.aws.go.codegen.customization.s3; + +import software.amazon.smithy.aws.go.codegen.SdkGoTypes; +import software.amazon.smithy.aws.go.codegen.customization.S3ModelUtils; +import software.amazon.smithy.aws.traits.HttpChecksumTrait; +import software.amazon.smithy.go.codegen.GoSettings; +import software.amazon.smithy.go.codegen.GoWriter; +import software.amazon.smithy.go.codegen.integration.GoIntegration; +import software.amazon.smithy.go.codegen.integration.MiddlewareRegistrar; +import software.amazon.smithy.go.codegen.integration.RuntimeClientPlugin; +import software.amazon.smithy.model.Model; +import software.amazon.smithy.model.shapes.OperationShape; +import software.amazon.smithy.model.shapes.ServiceShape; +import software.amazon.smithy.utils.ListUtils; + +import java.util.List; + +import static software.amazon.smithy.go.codegen.SymbolUtils.buildPackageSymbol; + +/** + * Defaults the checksum to CRC32 for requests against the Express storage class when a checksum is required but not set + * (we'd normally default to MD5). Defaults to CRC32 for transfer manager operations as well. + */ +public class ExpressDefaultChecksum implements GoIntegration { + private static final MiddlewareRegistrar SET_ALGORITHM_MIDDLEWARE = MiddlewareRegistrar.builder() + .resolvedFunction(SdkGoTypes.ServiceCustomizations.S3.AddExpressDefaultChecksumMiddleware) + .build(); + + private static final MiddlewareRegistrar SET_MPU_ALGORITHM_MIDDLEWARE = MiddlewareRegistrar.builder() + .resolvedFunction(buildPackageSymbol("addSetCreateMPUChecksumAlgorithm")) + .build(); + + private static boolean hasRequiredChecksum(Model model, ServiceShape service, OperationShape operation) { + return operation.hasTrait(HttpChecksumTrait.class) + && operation.expectTrait(HttpChecksumTrait.class).isRequestChecksumRequired(); + } + + private static boolean isCreateMultipartUpload(Model model, ServiceShape service, OperationShape operation) { + return operation.getId().getName().equals("CreateMultipartUpload"); + } + + @Override + public byte getOrder() { + return 127; + } + + @Override + public List getClientPlugins() { + return ListUtils.of( + RuntimeClientPlugin.builder() + .servicePredicate(S3ModelUtils::isServiceS3) + .operationPredicate(ExpressDefaultChecksum::hasRequiredChecksum) + .registerMiddleware(SET_ALGORITHM_MIDDLEWARE) + .build(), + RuntimeClientPlugin.builder() + .servicePredicate(S3ModelUtils::isServiceS3) + .operationPredicate(ExpressDefaultChecksum::isCreateMultipartUpload) + .registerMiddleware(SET_MPU_ALGORITHM_MIDDLEWARE) + .build() + ); + } + + @Override + public void renderPostEndpointResolutionHook(GoSettings settings, GoWriter writer, Model model) { + if (!S3ModelUtils.isServiceS3(model, settings.getService(model))) return; + + writer.write(""" + backend := $T(&endpt.Properties) + ctx = $T(ctx, backend) + """, + SdkGoTypes.ServiceCustomizations.S3.GetPropertiesBackend, + SdkGoTypes.Internal.Context.SetS3Backend); + } +} diff --git a/codegen/smithy-aws-go-codegen/src/main/resources/META-INF/services/software.amazon.smithy.go.codegen.integration.GoIntegration b/codegen/smithy-aws-go-codegen/src/main/resources/META-INF/services/software.amazon.smithy.go.codegen.integration.GoIntegration index 695579db572..f6917466637 100644 --- a/codegen/smithy-aws-go-codegen/src/main/resources/META-INF/services/software.amazon.smithy.go.codegen.integration.GoIntegration +++ b/codegen/smithy-aws-go-codegen/src/main/resources/META-INF/services/software.amazon.smithy.go.codegen.integration.GoIntegration @@ -69,3 +69,6 @@ software.amazon.smithy.aws.go.codegen.customization.auth.AwsSigV4aAuthScheme software.amazon.smithy.aws.go.codegen.customization.auth.LegacyAuthContextOverride software.amazon.smithy.aws.go.codegen.customization.auth.IgnoreAnonymousCredentials software.amazon.smithy.aws.go.codegen.customization.RemoveDefaults +software.amazon.smithy.aws.go.codegen.customization.auth.S3ExpressAuthScheme +software.amazon.smithy.aws.go.codegen.customization.S3BucketContext +software.amazon.smithy.aws.go.codegen.customization.s3.ExpressDefaultChecksum diff --git a/config/env_config.go b/config/env_config.go index 78bc1493372..fb0615aeb42 100644 --- a/config/env_config.go +++ b/config/env_config.go @@ -74,6 +74,8 @@ const ( awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" awsEndpointURL = "AWS_ENDPOINT_URL" + + awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" ) var ( @@ -268,6 +270,13 @@ type EnvConfig struct { // Value to contain configured endpoints to be propagated to // corresponding endpoint resolution field. BaseEndpoint string + + // Whether S3Express auth is disabled. + // + // This will NOT prevent requests from being made to S3Express buckets, it + // will only bypass the modified endpoint routing and signing behaviors + // associated with the feature. + S3DisableExpressAuth *bool } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -356,6 +365,10 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } + if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil { + return cfg, err + } + return cfg, nil } @@ -736,3 +749,13 @@ func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { return *c.EC2IMDSv1Disabled, true } + +// GetS3DisableExpressAuth returns the configured value for +// [EnvConfig.S3DisableExpressAuth]. +func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) { + if c.S3DisableExpressAuth == nil { + return false, false + } + + return *c.S3DisableExpressAuth, true +} diff --git a/config/load_options.go b/config/load_options.go index 7480bb45ed1..ad55eb99490 100644 --- a/config/load_options.go +++ b/config/load_options.go @@ -206,6 +206,9 @@ type LoadOptions struct { // The sdk app ID retrieved from env var or shared config to be added to request user agent header AppID string + + // Whether S3 S3Beta2022a auth is disabled. + S3DisableS3Beta2022aAuth *bool } func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { @@ -1044,3 +1047,22 @@ func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsMod return nil } } + +// GetS3DisableS3Beta2022aAuth returns the configured value for +// [EnvConfig.S3DisableS3Beta2022aAuth]. +func (o LoadOptions) GetS3DisableS3Beta2022aAuth() (value, ok bool) { + if o.S3DisableS3Beta2022aAuth == nil { + return false, false + } + + return *o.S3DisableS3Beta2022aAuth, true +} + +// WithS3DisableS3Beta2022aAuth sets [LoadOptions.S3DisableS3Beta2022aAuth] +// to the value provided. +func WithS3DisableS3Beta2022aAuth(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3DisableS3Beta2022aAuth = &v + return nil + } +} diff --git a/config/shared_config.go b/config/shared_config.go index 20683bf5f07..823eafe12b8 100644 --- a/config/shared_config.go +++ b/config/shared_config.go @@ -107,6 +107,8 @@ const ( ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" endpointURL = "endpoint_url" + + s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -316,6 +318,13 @@ type SharedConfig struct { // Value to contain services section content. Services Services + + // Whether S3Express auth is disabled. + // + // This will NOT prevent requests from being made to S3Express buckets, it + // will only bypass the modified endpoint routing and signing behaviors + // associated with the feature. + S3DisableExpressAuth *bool } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -435,6 +444,16 @@ func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEnd return c.UseFIPSEndpoint, true, nil } +// GetS3DisableExpressAuth returns the configured value for +// [SharedConfig.S3DisableExpressAuth]. +func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) { + if c.S3DisableExpressAuth == nil { + return false, false + } + + return *c.S3DisableExpressAuth, true +} + // GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { if len(c.CustomCABundle) == 0 { @@ -1054,6 +1073,7 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) + updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey) if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) diff --git a/feature/s3/manager/upload.go b/feature/s3/manager/upload.go index 598cedbac26..d1be506e06f 100644 --- a/feature/s3/manager/upload.go +++ b/feature/s3/manager/upload.go @@ -13,8 +13,11 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/awsutil" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" ) // MaxUploadParts is the maximum allowed number of parts in a multi-part upload @@ -308,6 +311,9 @@ func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ... clientOptions = append(clientOptions, func(o *s3.Options) { o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + func(s *smithymiddleware.Stack) error { + return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After) + }, ) }) clientOptions = append(clientOptions, i.cfg.ClientOptions...) @@ -808,3 +814,42 @@ type readerAtSeeker interface { io.ReaderAt io.ReadSeeker } + +// setS3ExpressDefaultChecksum defaults to CRC32 for S3Express buckets, +// which is required when uploading to those through transfer manager. +type setS3ExpressDefaultChecksum struct{} + +func (*setS3ExpressDefaultChecksum) ID() string { + return "setS3ExpressDefaultChecksum" +} + +func (*setS3ExpressDefaultChecksum) HandleFinalize( + ctx context.Context, in smithymiddleware.FinalizeInput, next smithymiddleware.FinalizeHandler, +) ( + out smithymiddleware.FinalizeOutput, metadata smithymiddleware.Metadata, err error, +) { + const checksumHeader = "x-amz-checksum-algorithm" + + if internalcontext.GetS3Backend(ctx) != internalcontext.S3BackendS3Express { + return next.HandleFinalize(ctx, in) + } + + // If this is CreateMultipartUpload we need to ensure the checksum + // algorithm header is present. Otherwise everything is driven off the + // context setting and we can let it flow from there. + if middleware.GetOperationName(ctx) == "CreateMultipartUpload" { + r, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { + r.Header.Set(checksumHeader, "CRC32") + } + return next.HandleFinalize(ctx, in) + } else if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(types.ChecksumAlgorithmCrc32)) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/internal/auth/scheme.go b/internal/auth/scheme.go index ff229c048ff..bbc2ec06ecc 100644 --- a/internal/auth/scheme.go +++ b/internal/auth/scheme.go @@ -16,6 +16,9 @@ const SigV4 = "sigv4" // Authentication Scheme Signature Version 4A const SigV4A = "sigv4a" +// SigV4S3Express identifies the S3 S3Express auth scheme. +const SigV4S3Express = "sigv4-s3express" + // None is a constant representing the // None Authentication Scheme const None = "none" @@ -24,9 +27,10 @@ const None = "none" // that indicates the list of supported AWS // authentication schemes var SupportedSchemes = map[string]bool{ - SigV4: true, - SigV4A: true, - None: true, + SigV4: true, + SigV4A: true, + SigV4S3Express: true, + None: true, } // AuthenticationScheme is a representation of @@ -93,10 +97,11 @@ func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, err for _, scheme := range authSchemes { authScheme, _ := scheme.(map[string]interface{}) - switch authScheme["name"] { - case SigV4: + version := authScheme["name"].(string) + switch version { + case SigV4, SigV4S3Express: v4Scheme := AuthenticationSchemeV4{ - Name: SigV4, + Name: version, SigningName: getSigningName(authScheme), SigningRegion: getSigningRegion(authScheme), DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), diff --git a/internal/auth/scheme_test.go b/internal/auth/scheme_test.go index 49fc3c94be2..83e1dd6a4f4 100644 --- a/internal/auth/scheme_test.go +++ b/internal/auth/scheme_test.go @@ -73,3 +73,29 @@ func TestV4A(t *testing.T) { } } + +func TestV4S3Express(t *testing.T) { + props := smithy.Properties{} + props.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": SigV4S3Express, + "signingName": "s3", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true, + }, + }) + + result, err := GetAuthenticationSchemes(&props) + if err != nil { + t.Fatalf("Did not expect error, got %v", err) + } + + scheme, ok := result[0].(*AuthenticationSchemeV4) + if !ok { + t.Fatalf("Did not get expected AuthenticationSchemeV4. %v", result[0]) + } + + if scheme.Name != SigV4S3Express { + t.Fatalf("expected %s, got %s", SigV4S3Express, scheme.Name) + } +} diff --git a/internal/context/context.go b/internal/context/context.go new file mode 100644 index 00000000000..15bf104772f --- /dev/null +++ b/internal/context/context.go @@ -0,0 +1,39 @@ +package context + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type s3BackendKey struct{} +type checksumInputAlgorithmKey struct{} + +const ( + // S3BackendS3Express identifies the S3Express backend + S3BackendS3Express = "S3Express" +) + +// SetS3Backend stores the resolved endpoint backend within the request +// context, which is required for a variety of custom S3 behaviors. +func SetS3Backend(ctx context.Context, typ string) context.Context { + return middleware.WithStackValue(ctx, s3BackendKey{}, typ) +} + +// GetS3Backend retrieves the stored endpoint backend within the context. +func GetS3Backend(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string) + return v +} + +// SetChecksumInputAlgorithm sets the request checksum algorithm on the +// context. +func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value) +} + +// GetChecksumInputAlgorithm returns the checksum algorithm from the context. +func GetChecksumInputAlgorithm(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) + return v +} diff --git a/service/internal/checksum/middleware_compute_input_checksum.go b/service/internal/checksum/middleware_compute_input_checksum.go index 8943613ca8e..c7740658aea 100644 --- a/service/internal/checksum/middleware_compute_input_checksum.go +++ b/service/internal/checksum/middleware_compute_input_checksum.go @@ -9,6 +9,7 @@ import ( "strconv" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -377,7 +378,7 @@ func (m *addInputChecksumTrailer) HandleFinalize( } func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) { - ctxAlgorithm := getContextInputAlgorithm(ctx) + ctxAlgorithm := internalcontext.GetChecksumInputAlgorithm(ctx) if ctxAlgorithm == "" { return "", false, nil } diff --git a/service/internal/checksum/middleware_compute_input_checksum_test.go b/service/internal/checksum/middleware_compute_input_checksum_test.go index 2d4cf59d384..3505663e5e1 100644 --- a/service/internal/checksum/middleware_compute_input_checksum_test.go +++ b/service/internal/checksum/middleware_compute_input_checksum_test.go @@ -16,6 +16,7 @@ import ( "testing/iotest" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -49,7 +50,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { "no op": { "checksum header set known length": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -72,7 +73,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "checksum header set unknown length": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -126,7 +127,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { "build handled": { "http nil stream": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -146,7 +147,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http empty stream": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -169,7 +170,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https empty stream unseekable": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -192,7 +193,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http empty stream unseekable": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -215,7 +216,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https nil stream": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -234,7 +235,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https empty stream": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -323,7 +324,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http seekable": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -346,7 +347,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http payload hash already set": { initContext: func(ctx context.Context) context.Context { - ctx = setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) ctx = v4.SetPayloadHash(ctx, "somehash") return ctx }, @@ -371,7 +372,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http seekable checksum matches payload hash": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmSHA256)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmSHA256)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -394,7 +395,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http payload hash disabled": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, optionsFn: func(o *computeInputPayloadChecksum) { o.EnableComputePayloadHash = false @@ -422,7 +423,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { o.EnableTrailingChecksum = false }, initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -447,7 +448,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { o.EnableTrailingChecksum = false }, initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -474,7 +475,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { "build error": { "unknown algorithm": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string("unknown")) + return internalcontext.SetChecksumInputAlgorithm(ctx, string("unknown")) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -504,7 +505,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http unseekable stream": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -519,7 +520,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http stream read error": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -537,7 +538,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "http stream rewind error": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -558,7 +559,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { o.EnableTrailingChecksum = false }, initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -576,7 +577,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { "finalize handled": { "https unseekable": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -602,7 +603,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https unseekable unknown length": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -627,7 +628,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https seekable": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -653,7 +654,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https seekable unknown length": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { @@ -679,7 +680,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https no compute payload hash": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, optionsFn: func(o *computeInputPayloadChecksum) { o.EnableComputePayloadHash = false @@ -707,7 +708,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "https no decode content length": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, optionsFn: func(o *computeInputPayloadChecksum) { o.EnableDecodedContentLengthHeader = false @@ -735,7 +736,7 @@ func TestComputeInputPayloadChecksum(t *testing.T) { }, "with content encoding set": { initContext: func(ctx context.Context) context.Context { - return setContextInputAlgorithm(ctx, string(AlgorithmCRC32)) + return internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) }, buildInput: middleware.BuildInput{ Request: func() *smithyhttp.Request { diff --git a/service/internal/checksum/middleware_setup_context.go b/service/internal/checksum/middleware_setup_context.go index f7295254976..3db73afe7e8 100644 --- a/service/internal/checksum/middleware_setup_context.go +++ b/service/internal/checksum/middleware_setup_context.go @@ -3,6 +3,7 @@ package checksum import ( "context" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/smithy-go/middleware" ) @@ -35,33 +36,13 @@ func (m *setupInputContext) HandleInitialize( // check is input resource has a checksum algorithm algorithm, ok := m.GetAlgorithm(in.Parameters) if ok && len(algorithm) != 0 { - ctx = setContextInputAlgorithm(ctx, algorithm) + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) } } return next.HandleInitialize(ctx, in) } -// inputAlgorithmKey is the key set on context used to identify, retrieves the -// request checksum algorithm if present on the context. -type inputAlgorithmKey struct{} - -// setContextInputAlgorithm sets the request checksum algorithm on the context. -// -// Scoped to stack values. -func setContextInputAlgorithm(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value) -} - -// getContextInputAlgorithm returns the checksum algorithm from the context if -// one was specified. Empty string is returned if one is not specified. -// -// Scoped to stack values. -func getContextInputAlgorithm(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string) - return v -} - type setupOutputContext struct { // GetValidationMode is a function to get the checksum validation // mode of the output payload from the input parameters. diff --git a/service/internal/checksum/middleware_setup_context_test.go b/service/internal/checksum/middleware_setup_context_test.go index 3235983bad4..e629ee088d7 100644 --- a/service/internal/checksum/middleware_setup_context_test.go +++ b/service/internal/checksum/middleware_setup_context_test.go @@ -7,6 +7,7 @@ import ( "context" "testing" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/smithy-go/middleware" ) @@ -60,7 +61,7 @@ func TestSetupInput(t *testing.T) { func(ctx context.Context, input middleware.InitializeInput) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - v := getContextInputAlgorithm(ctx) + v := internalcontext.GetChecksumInputAlgorithm(ctx) if e, a := c.expectValue, v; e != a { t.Errorf("expect value %v, got %v", e, a) } diff --git a/service/s3/api_client.go b/service/s3/api_client.go index fc0a556211f..6e8e461738e 100644 --- a/service/s3/api_client.go +++ b/service/s3/api_client.go @@ -70,6 +70,8 @@ func New(options Options, optFns ...func(*Options)) *Client { ignoreAnonymousAuth(&options) + resolveExpressCredentials(&options) + finalizeServiceEndpointAuthResolver(&options) resolveAuthSchemes(&options) @@ -78,6 +80,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + finalizeExpressCredentials(&options, client) + return client } @@ -177,6 +181,11 @@ func resolveAuthSchemes(options *Options) { Logger: options.Logger, LogSigning: options.ClientLogMode.IsSigning(), }), + internalauth.NewHTTPAuthScheme("com.amazonaws.s3#sigv4express", &s3cust.ExpressSigner{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), internalauth.NewHTTPAuthScheme("aws.auth#sigv4a", &v4a.SignerAdapter{ Signer: options.httpSignerV4a, Logger: options.Logger, @@ -257,6 +266,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { resolveAWSEndpointResolver(cfg, &opts) resolveUseARNRegion(cfg, &opts) resolveDisableMultiRegionAccessPoints(cfg, &opts) + resolveDisableExpressAuth(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) resolveBaseEndpoint(cfg, &opts) @@ -721,7 +731,7 @@ func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, i schemeID := rscheme.Scheme.SchemeID() ctx = s3cust.SetSignerVersion(ctx, schemeID) - if schemeID == "aws.auth#sigv4" { + if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { ctx = awsmiddleware.SetSigningName(ctx, sn) } @@ -765,9 +775,10 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op return err } - // add multi-region access point presigner + // extended s3 presigning signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{ CredentialsProvider: options.Credentials, + ExpressCredentials: options.ExpressCredentials, V4Presigner: c.Presigner, V4aPresigner: c.presignerV4a, LogSigning: options.ClientLogMode.IsSigning(), diff --git a/service/s3/api_op_AbortMultipartUpload.go b/service/s3/api_op_AbortMultipartUpload.go index b37cdb56d62..d1e7dceaee2 100644 --- a/service/s3/api_op_AbortMultipartUpload.go +++ b/service/s3/api_op_AbortMultipartUpload.go @@ -13,18 +13,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action aborts a multipart upload. After a multipart upload is aborted, no -// additional parts can be uploaded using that upload ID. The storage consumed by -// any previously uploaded parts will be freed. However, if any part uploads are +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads are // currently in progress, those part uploads might or might not succeed. As a // result, it might be necessary to abort a given multipart upload multiple times // in order to completely free all storage consumed by all parts. To verify that -// all parts have been removed, so you don't get charged for the part storage, you -// should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// action and ensure that the parts list is empty. For information about -// permissions required to use the multipart upload, see Multipart Upload and -// Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// . The following operations are related to AbortMultipartUpload : +// all parts have been removed and prevent getting charged for the part storage, +// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// API operation and ensure that the parts list is empty. Directory buckets - For +// directory buckets, you must make requests for this API operation to the Zonal +// endpoint. These endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to AbortMultipartUpload : // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) @@ -47,16 +69,26 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar type AbortMultipartUploadInput struct { - // The bucket name to which the upload was taking place. When using this action - // with an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The bucket name to which the upload was taking place. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -76,18 +108,19 @@ type AbortMultipartUploadInput struct { // This member is required. UploadId *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer noSmithyDocumentSerde @@ -102,7 +135,7 @@ func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -166,6 +199,9 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_CompleteMultipartUpload.go b/service/s3/api_op_CompleteMultipartUpload.go index a50dcf6dbe2..b9f094f1e10 100644 --- a/service/s3/api_op_CompleteMultipartUpload.go +++ b/service/s3/api_op_CompleteMultipartUpload.go @@ -15,54 +15,81 @@ import ( // Completes a multipart upload by assembling previously uploaded parts. You first // initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // operation. After successfully uploading all relevant parts of an upload, you -// call this action to complete the upload. Upon receiving this request, Amazon S3 -// concatenates all the parts in ascending order by part number to create a new -// object. In the Complete Multipart Upload request, you must provide the parts -// list. You must ensure that the parts list is complete. This action concatenates -// the parts that you provide in the list. For each part in the list, you must -// provide the part number and the ETag value, returned after that part was -// uploaded. Processing of a Complete Multipart Upload request could take several -// minutes to complete. After Amazon S3 begins processing the request, it sends an -// HTTP response header that specifies a 200 OK response. While processing is in -// progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. A request could fail after the initial 200 OK -// response has been sent. This means that a 200 OK response can contain either a -// success or an error. If you call the S3 API directly, make sure to design your -// application to parse the contents of the response and handle it appropriately. -// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect -// the embedded error and apply error handling per your configuration settings -// (including automatically retrying the request as appropriate). If the condition -// persists, the SDKs throws an exception (or, for the SDKs that don't use -// exceptions, they return the error). Note that if CompleteMultipartUpload fails, -// applications should be prepared to retry the failed requests. For more -// information, see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) -// . You cannot use Content-Type: application/x-www-form-urlencoded with Complete -// Multipart Upload requests. Also, if you do not provide a Content-Type header, -// CompleteMultipartUpload returns a 200 OK response. For more information about -// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// . For information about permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// . CompleteMultipartUpload has the following special errors: -// - Error code: EntityTooSmall +// call this CompleteMultipartUpload operation to complete the upload. Upon +// receiving this request, Amazon S3 concatenates all the parts in ascending order +// by part number to create a new object. In the CompleteMultipartUpload request, +// you must provide the parts list and ensure that the parts list is complete. The +// CompleteMultipartUpload API operation concatenates the parts that you provide in +// the list. For each part in the list, you must provide the PartNumber value and +// the ETag value that are returned after that part was uploaded. The processing +// of a CompleteMultipartUpload request could take several minutes to finalize. +// After Amazon S3 begins processing the request, it sends an HTTP response header +// that specifies a 200 OK response. While processing is in progress, Amazon S3 +// periodically sends white space characters to keep the connection from timing +// out. A request could fail after the initial 200 OK response has been sent. This +// means that a 200 OK response can contain either a success or an error. The +// error response might be embedded in the 200 OK response. If you call this API +// operation directly, make sure to design your application to parse the contents +// of the response and handle it appropriately. If you use Amazon Web Services +// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply +// error handling per your configuration settings (including automatically retrying +// the request as appropriate). If the condition persists, the SDKs throw an +// exception (or, for the SDKs that don't use exceptions, they return an error). +// Note that if CompleteMultipartUpload fails, applications should be prepared to +// retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) +// . You can't use Content-Type: application/x-www-form-urlencoded for the +// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type +// header, CompleteMultipartUpload can still return a 200 OK response. For more +// information about multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Special errors +// - Error Code: EntityTooSmall // - Description: Your proposed upload is smaller than the minimum allowed // object size. Each part must be at least 5 MB in size, except the last part. -// - 400 Bad Request -// - Error code: InvalidPart +// - HTTP Status Code: 400 Bad Request +// - Error Code: InvalidPart // - Description: One or more of the specified parts could not be found. The -// part might not have been uploaded, or the specified entity tag might not have -// matched the part's entity tag. -// - 400 Bad Request -// - Error code: InvalidPartOrder +// part might not have been uploaded, or the specified ETag might not have matched +// the uploaded part's ETag. +// - HTTP Status Code: 400 Bad Request +// - Error Code: InvalidPartOrder // - Description: The list of parts was not in ascending order. The parts list // must be specified in order by part number. -// - 400 Bad Request -// - Error code: NoSuchUpload +// - HTTP Status Code: 400 Bad Request +// - Error Code: NoSuchUpload // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. -// - 404 Not Found +// - HTTP Status Code: 404 Not Found // -// The following operations are related to CompleteMultipartUpload : +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CompleteMultipartUpload : // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) @@ -85,16 +112,26 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu type CompleteMultipartUploadInput struct { - // Name of the bucket to which the multipart upload was initiated. When using this - // action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // Name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -142,9 +179,9 @@ type CompleteMultipartUploadInput struct { // in the Amazon S3 User Guide. ChecksumSHA256 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The container for the multipart upload request information. @@ -152,30 +189,34 @@ type CompleteMultipartUploadInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is required only when the object was created using a checksum // algorithm or if your bucket policy requires the use of SSE-C. For more // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde @@ -190,52 +231,48 @@ func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. When using this action with - // an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // the access point ARN or access point alias if used. Access points are not + // supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -251,6 +288,7 @@ type CompleteMultipartUploadOutput struct { // If the object expiration is configured, this will contain the expiration date ( // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + // This functionality is not supported for directory buckets. Expiration *string // The object key of the newly created object. @@ -260,19 +298,21 @@ type CompleteMultipartUploadOutput struct { Location *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). + // (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Version ID of the newly created object, in case the bucket has versioning - // turned on. + // turned on. This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -336,6 +376,9 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_CopyObject.go b/service/s3/api_op_CopyObject.go index 83923f8d7bc..deb21cee051 100644 --- a/service/s3/api_op_CopyObject.go +++ b/service/s3/api_op_CopyObject.go @@ -10,6 +10,7 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "time" ) @@ -20,135 +21,89 @@ import ( // object greater than 5 GB, you must use the multipart upload Upload Part - Copy // (UploadPartCopy) API. For more information, see Copy Object Using the REST // Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html) -// . All copy requests must be authenticated. Additionally, you must have read -// access to the source object and write access to the destination bucket. For more -// information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Both the Region that you want to copy the object from and the Region that you -// want to copy the object to must be enabled for your account. A copy request -// might return an error when Amazon S3 receives the copy request or while Amazon -// S3 is copying the files. If the error occurs before the copy action starts, you -// receive a standard Amazon S3 error. If the error occurs during the copy -// operation, the error response is embedded in the 200 OK response. This means -// that a 200 OK response can contain either a success or an error. If you call -// the S3 API directly, make sure to design your application to parse the contents -// of the response and handle it appropriately. If you use Amazon Web Services -// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply -// error handling per your configuration settings (including automatically retrying -// the request as appropriate). If the condition persists, the SDKs throws an -// exception (or, for the SDKs that don't use exceptions, they return the error). -// If the copy is successful, you receive a response with information about the -// copied object. If the request is an HTTP 1.1 request, the response is chunk -// encoded. If it were not, it would not contain the content-length, and you would -// need to read the entire body. The copy request charge is based on the storage -// class and Region that you specify for the destination object. The request can -// also result in a data retrieval charge for the source if the source storage -// class bills for data retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/) -// . Amazon S3 transfer acceleration does not support cross-Region copies. If you -// request a cross-Region copy using a transfer acceleration endpoint, you get a -// 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . Metadata When copying an object, you can preserve all metadata (the default) -// or specify new metadata. However, the access control list (ACL) is not preserved -// and is set to private for the user making the request. To override the default -// ACL setting, specify a new ACL when generating a copy request. For more -// information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// . To specify whether you want the object metadata copied from the source object -// or replaced with metadata provided in the request, you can optionally add the -// x-amz-metadata-directive header. When you grant permissions, you can use the -// s3:x-amz-metadata-directive condition key to enforce certain metadata behavior -// when objects are uploaded. For more information, see Specifying Conditions in a -// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) -// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition -// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) -// . x-amz-website-redirect-location is unique to each object and must be -// specified in the request headers to copy the value. x-amz-copy-source-if Headers -// To only copy an object under certain conditions, such as whether the Etag -// matches or whether the object was modified before or after a specified date, use -// the following request parameters: -// - x-amz-copy-source-if-match -// - x-amz-copy-source-if-none-match -// - x-amz-copy-source-if-unmodified-since -// - x-amz-copy-source-if-modified-since +// . You can copy individual objects between general purpose buckets, between +// directory buckets, and between general purpose buckets and directory buckets. +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Both the Region that you want to copy the object +// from and the Region that you want to copy the object to must be enabled for your +// account. Amazon S3 transfer acceleration does not support cross-Region copies. +// If you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// . Authentication and authorization All CopyObject requests must be +// authenticated and signed by using IAM credentials (access key ID and secret +// access key for the IAM identities). All headers with the x-amz- prefix, +// including x-amz-copy-source , must be signed. For more information, see REST +// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory buckets - You must use the IAM credentials to authenticate and +// authorize your access to the CopyObject API operation, instead of using the +// temporary security credentials through the CreateSession API operation. Amazon +// Web Services CLI or SDKs handles authentication and authorization on your +// behalf. Permissions You must have read access to the source object and write +// access to the destination bucket. +// - General purpose bucket permissions - You must have permissions in an IAM +// policy based on the source and destination bucket types in a CopyObject +// operation. +// - If the source object is in a general purpose bucket, you must have +// s3:GetObject permission to read the source object that is being copied. +// - If the destination bucket is a general purpose bucket, you must have +// s3:PubObject permission to write the object copy to the destination bucket. +// - Directory bucket permissions - You must have permissions in a bucket policy +// or an IAM identity-based policy based on the source and destination bucket types +// in a CopyObject operation. +// - If the source object that you want to copy is in a directory bucket, you +// must have the s3express:CreateSession permission in the Action element of a +// policy to read the object. By default, the session is in the ReadWrite mode. +// If you want to restrict the access, you can explicitly set the +// s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// - If the copy destination is a directory bucket, you must have the +// s3express:CreateSession permission in the Action element of a policy to write +// the object to the destination. The s3express:SessionMode condition key can't +// be set to ReadOnly on the copy destination bucket. For example policies, see +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. // -// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// 200 OK and copies the data: -// - x-amz-copy-source-if-match condition evaluates to true -// - x-amz-copy-source-if-unmodified-since condition evaluates to false +// Response and special errors When the request is an HTTP 1.1 request, the +// response is chunk encoded. When the request is not an HTTP 1.1 request, the +// response would not contain the Content-Length . You always need to read the +// entire response body to check if the copy succeeds. to keep the connection alive +// while we copy the data. +// - If the copy is successful, you receive a response with information about +// the copied object. +// - A copy request might return an error when Amazon S3 receives the copy +// request or while Amazon S3 is copying the files. A 200 OK response can contain +// either a success or an error. +// - If the error occurs before the copy action starts, you receive a standard +// Amazon S3 error. +// - If the error occurs during the copy operation, the error response is +// embedded in the 200 OK response. For example, in a cross-region copy, you may +// encounter throttling and receive a 200 OK response. For more information, see +// Resolve the Error 200 response when copying objects to Amazon S3 . The 200 OK +// status code means the copy was accepted, but it doesn't mean the copy is +// complete. Another example is when you disconnect from Amazon S3 before the copy +// is complete, Amazon S3 might cancel the copy and you may receive a 200 OK +// response. You must stay connected to Amazon S3 until the entire response is +// successfully received and processed. If you call this API operation directly, +// make sure to design your application to parse the content of the response and +// handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this +// condition. The SDKs detect the embedded error and apply error handling per your +// configuration settings (including automatically retrying the request as +// appropriate). If the condition persists, the SDKs throw an exception (or, for +// the SDKs that don't use exceptions, they return an error). // -// If both the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request and -// evaluate as follows, Amazon S3 returns the 412 Precondition Failed response -// code: -// - x-amz-copy-source-if-none-match condition evaluates to false -// - x-amz-copy-source-if-modified-since condition evaluates to true -// -// All headers with the x-amz- prefix, including x-amz-copy-source , must be -// signed. Server-side encryption Amazon S3 automatically encrypts all new objects -// that are copied to an S3 bucket. When copying an object, if you don't specify -// encryption information in your copy request, the encryption setting of the -// target object is set to the default encryption configuration of the destination -// bucket. By default, all buckets have a base level of encryption configuration -// that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the -// destination bucket has a default encryption configuration that uses server-side -// encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer -// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or -// server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 -// uses the corresponding KMS key, or a customer-provided key to encrypt the target -// object copy. When you perform a CopyObject operation, if you want to use a -// different type of encryption setting for the target object, you can use other -// appropriate encryption-related headers to encrypt the target object with a KMS -// key, an Amazon S3 managed key, or a customer-provided key. With server-side -// encryption, Amazon S3 encrypts your data as it writes your data to disks in its -// data centers and decrypts the data when you access it. If the encryption setting -// in your request is different from the default encryption configuration of the -// destination bucket, the encryption setting in your request takes precedence. If -// the source object for the copy is stored in Amazon S3 using SSE-C, you must -// provide the necessary encryption information in your request so that Amazon S3 -// can decrypt the object for copying. For more information about server-side -// encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// . If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the -// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. Access Control List (ACL)-Specific Request Headers -// When copying an object, you can optionally use headers to grant ACL-based -// permissions. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups that are defined by Amazon -// S3. These permissions are then added to the ACL on the object. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) -// . If the bucket that you're copying objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. Buckets that use this setting only accept PUT requests that don't -// specify an ACL or PUT requests that specify bucket owner full control ACLs, -// such as the bucket-owner-full-control canned ACL or an equivalent form of this -// ACL expressed in the XML format. For more information, see Controlling -// ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for Object Ownership, all objects written to the bucket by any account -// will be owned by the bucket owner. Checksums When copying an object, if it has a -// checksum, that checksum will be copied to the new object by default. When you -// copy the object over, you can optionally specify a different checksum algorithm -// to use with the x-amz-checksum-algorithm header. Storage Class Options You can -// use the CopyObject action to change the storage class of an object that is -// already stored in Amazon S3 by using the StorageClass parameter. For more -// information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. If the source object's storage class is GLACIER or -// DEEP_ARCHIVE, or the object's storage class is INTELLIGENT_TIERING and it's S3 -// Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) -// is Archive Access or Deep Archive Access, you must restore a copy of this object -// before you can use it as a source object for the copy operation. For more -// information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) -// . Versioning By default, x-amz-copy-source header identifies the current -// version of an object to copy. If the current version is a delete marker, Amazon -// S3 behaves as if the object was deleted. To copy a different version, use the -// versionId subresource. If you enable versioning on the target bucket, Amazon S3 -// generates a unique version ID for the object being copied. This version ID is -// different from the version ID of the source object. Amazon S3 returns the -// version ID of the copied object in the x-amz-version-id response header in the -// response. If you do not enable versioning or suspend it on the target bucket, -// the version ID that Amazon S3 generates is always null. The following operations -// are related to CopyObject : +// Charge The copy request charge is based on the storage class and Region that +// you specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/) +// . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CopyObject : // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { @@ -168,16 +123,26 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns type CopyObjectInput struct { - // The name of the destination bucket. When using this action with an access - // point, you must direct requests to the access point hostname. The access point - // hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the destination bucket. Directory buckets - When you use this + // operation with a directory bucket, you must use virtual-hosted-style requests in + // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style + // requests are not supported. Directory bucket names must be unique in the chosen + // Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -187,35 +152,52 @@ type CopyObjectInput struct { // This member is required. Bucket *string - // Specifies the source object for the copy operation. You specify the value in - // one of two formats, depending on whether you want to access the source object - // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) + // Specifies the source object for the copy operation. The source object can be up + // to 5 GB. If the source object is an object that was uploaded by using a + // multipart upload, the object copy will be a single part object after the source + // object is copied to the destination bucket. You specify the value of the copy + // source in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) // : // - For objects not accessed through an access point, specify the name of the // source bucket and the key of the source object, separated by a slash (/). For - // example, to copy the object reports/january.pdf from the bucket - // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must - // be URL-encoded. + // example, to copy the object reports/january.pdf from the general purpose + // bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value + // must be URL-encoded. To copy the object reports/january.pdf from the directory + // bucket awsexamplebucket--use1-az5--x-s3 , use + // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be + // URL-encoded. // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object // reports/january.pdf through access point my-access-point owned by account // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf - // . The value must be URL encoded. Amazon S3 supports copy operations using access - // points only when the source and destination buckets are in the same Amazon Web - // Services Region. Alternatively, for objects accessed through Amazon S3 on - // Outposts, specify the ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 - // in Region us-west-2 , use the URL encoding of + // . The value must be URL encoded. + // - Amazon S3 supports copy operations using Access points only when the source + // and destination buckets are in the same Amazon Web Services Region. + // - Access points are not supported by directory buckets. Alternatively, for + // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as + // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, + // to copy the object reports/january.pdf through outpost my-outpost owned by + // account 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. - // To copy a specific version of an object, append ?versionId= to the value (for - // example, + // If your source bucket versioning is enabled, the x-amz-copy-source header by + // default identifies the current version of an object to copy. If the current + // version is a delete marker, Amazon S3 behaves as if the object was deleted. To + // copy a different version, use the versionId query parameter. Specifically, + // append ?versionId= to the value (for example, // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. + // source object. If you enable versioning on the destination bucket, Amazon S3 + // generates a unique version ID for the copied object. This version ID is + // different from the version ID of the source object. Amazon S3 returns the + // version ID of the copied object in the x-amz-version-id response header in the + // response. If you do not enable versioning or suspend it on the destination + // bucket, the version ID that Amazon S3 generates in the x-amz-version-id + // response header is always null. Directory buckets - S3 Versioning isn't enabled + // and supported for directory buckets. // // This member is required. CopySource *string @@ -225,139 +207,230 @@ type CopyObjectInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. This action is not supported by Amazon - // S3 on Outposts. + // The canned access control list (ACL) to apply to the object. When you copy an + // object, the ACL metadata is not preserved and is set to private by default. + // Only the owner has full access control. To override the default ACL setting, + // specify a new ACL when you generate a copy request. For more information, see + // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) + // . If the destination bucket that you're copying objects to uses the bucket owner + // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect + // permissions. Buckets that use this setting only accept PUT requests that don't + // specify an ACL or PUT requests that specify bucket owner full control ACLs, + // such as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. For more information, see Controlling + // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // - If your destination bucket uses the bucket owner enforced setting for + // Object Ownership, all objects written to the bucket by any account will be owned + // by the bucket owner. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t - // affect bucket-level settings for S3 Bucket Key. + // affect bucket-level settings for S3 Bucket Key. For more information, see + // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. This functionality is not supported when the + // destination bucket is a directory bucket. BucketKeyEnabled *bool - // Specifies caching behavior along the request/reply chain. + // Specifies the caching behavior along the request/reply chain. CacheControl *string - // Indicates the algorithm you want Amazon S3 to use to create the checksum for - // the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. When you copy an object, if the source object has a + // checksum, that checksum value will be copied to the new object by default. If + // the CopyObject request does not include this x-amz-checksum-algorithm header, + // the checksum algorithm will be copied from the source object to the destination + // object (if it's present on the source object). You can optionally specify a + // different checksum algorithm to use with the x-amz-checksum-algorithm header. + // Unrecognized or unsupported values will respond with the HTTP status code 400 + // Bad Request . For directory buckets, when you use Amazon Web Services SDKs, + // CRC32 is the default checksum algorithm that's used for performance. ChecksumAlgorithm types.ChecksumAlgorithm - // Specifies presentational information for the object. + // Specifies presentational information for the object. Indicates whether an + // object should be displayed in a web browser or downloaded as a file. It allows + // specifying the desired filename for the downloaded file. ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. + // Content-Type header field. For directory buckets, only the aws-chunked value is + // supported in this header field. ContentEncoding *string // The language the content is in. ContentLanguage *string - // A standard MIME type describing the format of the object data. + // A standard MIME type that describes the format of the object data. ContentType *string - // Copies the object if its entity tag (ETag) matches the specified tag. + // Copies the object if its entity tag (ETag) matches the specified tag. If both + // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // - x-amz-copy-source-if-match condition evaluates to true + // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. + // Copies the object if it has been modified since the specified time. If both the + // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers + // are present in the request and evaluate as follows, Amazon S3 returns the 412 + // Precondition Failed response code: + // - x-amz-copy-source-if-none-match condition evaluates to false + // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified ETag. + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. If both the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request and + // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + // code: + // - x-amz-copy-source-if-none-match condition evaluates to false + // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. + // Copies the object if it hasn't been modified since the specified time. If both + // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // - x-amz-copy-source-if-match condition evaluates to true + // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). + // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C, + // you must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. This functionality is not + // supported when the source object is in a directory bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. + // the source object. The encryption key provided in this header must be the same + // one that was used when the source object was created. If the source object for + // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary + // encryption information in your request so that Amazon S3 can decrypt the object + // for copying. This functionality is not supported when the source object is in a + // directory bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. If the source object for the copy + // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption + // information in your request so that Amazon S3 can decrypt the object for + // copying. This functionality is not supported when the source object is in a + // directory bucket. CopySourceSSECustomerKeyMD5 *string - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID that + // you provide does not match the actual owner of the destination bucket, the + // request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The account ID of the expected source bucket owner. If the source bucket is - // owned by a different account, the request fails with the HTTP status code 403 - // Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that you + // provide does not match the actual owner of the source bucket, the request fails + // with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string // The date and time at which the object is no longer cacheable. Expires *time.Time - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This - // action is not supported by Amazon S3 on Outposts. + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to read the object data and its metadata. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to read the object data and its metadata. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the object ACL. This action is not supported by Amazon - // S3 on Outposts. + // Allows grantee to read the object ACL. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to write the ACL for the applicable object. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. + // with metadata that's provided in the request. When copying an object, you can + // preserve all metadata (the default) or specify new metadata. If this header + // isn’t specified, COPY is the default behavior. General purpose bucket - For + // general purpose buckets, when you grant permissions, you can use the + // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior + // when objects are uploaded. For more information, see Amazon S3 condition key + // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) + // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each + // object and is not copied when using the x-amz-metadata-directive header. To + // copy the value, you must specify x-amz-website-redirect-location in the request + // header. MetadataDirective types.MetadataDirective - // Specifies whether you want to apply a legal hold to the copied object. + // Specifies whether you want to apply a legal hold to the object copy. This + // functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to the copied object. + // The Object Lock mode that you want to apply to the object copy. This + // functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when you want the copied object's Object Lock to expire. + // The date and time when you want the Object Lock of the object copy to expire. + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256 + // ). When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be + // discarded. Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported when the destination bucket is a directory bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported when the destination bucket is a directory bucket. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. This value must be explicitly - // added to specify encryption context for CopyObject requests. + // added to specify encryption context for CopyObject requests. This functionality + // is not supported when the destination bucket is a directory bucket. SSEKMSEncryptionContext *string // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object @@ -365,35 +438,132 @@ type CopyObjectInput struct { // they're not made via SSL or using SigV4. For information about configuring any // of the officially supported Amazon Web Services SDKs and Amazon Web Services // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported when the + // destination bucket is a directory bucket. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). + // (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported + // values won’t write a destination object and will receive a 400 Bad Request + // response. Amazon S3 automatically encrypts all new objects that are copied to an + // S3 bucket. When copying an object, if you don't specify encryption information + // in your copy request, the encryption setting of the target object is set to the + // default encryption configuration of the destination bucket. By default, all + // buckets have a base level of encryption configuration that uses server-side + // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a + // default encryption configuration that uses server-side encryption with Key + // Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with + // Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with + // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS + // key, or a customer-provided key to encrypt the target object copy. When you + // perform a CopyObject operation, if you want to use a different type of + // encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // With server-side encryption, Amazon S3 encrypts your data as it writes your data + // to disks in its data centers and decrypts the data when you access it. For more + // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 User Guide. For directory buckets, only server-side encryption + // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // If the x-amz-storage-class header is not used, the copied object will be stored // in the STANDARD Storage Class by default. The STANDARD storage class provides // high durability and high availability. Depending on performance needs, you can - // specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // specify a different Storage Class. + // - Directory buckets - For directory buckets, only the S3 Express One Zone + // storage class is supported to store newly created objects. Unsupported storage + // class values won't write a destination object and will respond with the HTTP + // status code 400 Bad Request . + // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + // You can use the CopyObject action to change the storage class of an object that + // is already stored in Amazon S3 by using the x-amz-storage-class header. For + // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. Before using an object as a source object for the + // copy operation, you must restore a copy of it if it meets any of the following + // conditions: + // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . + // - The storage class of the source object is INTELLIGENT_TIERING and it's S3 + // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) + // is Archive Access or Deep Archive Access . + // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) // in the Amazon S3 User Guide. StorageClass types.StorageClass - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective . The tag-set must be encoded as URL Query - // parameters. + // The tag-set for the object copy in the destination bucket. This value must be + // used in conjunction with the x-amz-tagging-directive if you choose REPLACE for + // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive + // , you don't need to set the x-amz-tagging header, because the tag-set will be + // copied from the source object directly. The tag-set must be encoded as URL Query + // parameters. The default value is the empty value. Directory buckets - For + // directory buckets in a CopyObject operation, only the empty tag-set is + // supported. Any requests that attempt to write non-empty tags into directory + // buckets will receive a 501 Not Implemented status code. When the destination + // bucket is a directory bucket, you will receive a 501 Not Implemented response + // in any of the following situations: + // - When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // - When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging . + // - When you don't set the x-amz-tagging-directive header and the source object + // has non-empty tags. This is because the default value of + // x-amz-tagging-directive is COPY . + // Because only the empty tag-set is supported for directory buckets in a + // CopyObject operation, the following situations are allowed: + // - When you attempt to COPY the tag-set from a directory bucket source object + // that has no tags to a general purpose bucket. It copies an empty tag-set to the + // destination object. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and set the x-amz-tagging value of the directory bucket destination object to + // empty. + // - When you attempt to REPLACE the tag-set of a general purpose bucket source + // object that has non-empty tags and set the x-amz-tagging value of the + // directory bucket destination object to empty. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty value. Tagging *string - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. + // Specifies whether the object tag-set is copied from the source object or + // replaced with the tag-set that's provided in the request. The default value is + // COPY . Directory buckets - For directory buckets in a CopyObject operation, + // only the empty tag-set is supported. Any requests that attempt to write + // non-empty tags into directory buckets will receive a 501 Not Implemented status + // code. When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // - When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // - When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging . + // - When you don't set the x-amz-tagging-directive header and the source object + // has non-empty tags. This is because the default value of + // x-amz-tagging-directive is COPY . + // Because only the empty tag-set is supported for directory buckets in a + // CopyObject operation, the following situations are allowed: + // - When you attempt to COPY the tag-set from a directory bucket source object + // that has no tags to a general purpose bucket. It copies an empty tag-set to the + // destination object. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and set the x-amz-tagging value of the directory bucket destination object to + // empty. + // - When you attempt to REPLACE the tag-set of a general purpose bucket source + // object that has non-empty tags and set the x-amz-tagging value of the + // directory bucket destination object to empty. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty value. TaggingDirective types.TaggingDirective - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This value is unique to each object - // and is not copied when using the x-amz-metadata-directive header. Instead, you - // may opt to provide this header in combination with the directive. + // If the destination bucket is configured as a website, redirects requests for + // this object copy to another object in the same bucket or to an external URL. + // Amazon S3 stores the value of this header in the object metadata. This value is + // unique to each object and is not copied when using the x-amz-metadata-directive + // header. Instead, you may opt to provide this header in combination with the + // x-amz-metadata-directive header. This functionality is not supported for + // directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde @@ -401,52 +571,62 @@ type CopyObjectInput struct { func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. BucketKeyEnabled *bool // Container for all response elements. CopyObjectResult *types.CopyObjectResult - // Version of the copied object in the destination bucket. + // Version ID of the source object that was copied. This functionality is not + // supported when the source object is in a directory bucket. CopySourceVersionId *string - // If the object expiration is configured, the response includes this header. + // If the object expiration is configured, the response includes this header. This + // functionality is not supported for directory buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header confirming the encryption - // algorithm used. + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide round-trip message - // integrity verification of the customer-provided encryption key. + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. + // holding JSON with the encryption context key-value pairs. This functionality is + // not supported for directory buckets. SSEKMSEncryptionContext *string - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption - // Version ID of the newly created copy. + // Version ID of the newly created copy. This functionality is not supported for + // directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -510,6 +690,9 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpCopyObjectValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_CreateBucket.go b/service/s3/api_op_CreateBucket.go index 5fc78859857..6357444ba10 100644 --- a/service/s3/api_op_CreateBucket.go +++ b/service/s3/api_op_CreateBucket.go @@ -14,64 +14,80 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 -// and have a valid Amazon Web Services Access Key ID to authenticate requests. +// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts +// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) +// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +// have a valid Amazon Web Services Access Key ID to authenticate requests. // Anonymous requests are never allowed to create buckets. By creating the bucket, -// you become the bucket owner. Not every string is an acceptable bucket name. For -// information about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) -// . If you want to create an Amazon S3 on Outposts bucket, see Create Bucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) -// . By default, the bucket is created in the US East (N. Virginia) Region. You can -// optionally specify a Region in the request body. To constrain the bucket -// creation to a specific Region, you can use LocationConstraint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketConfiguration.html) -// condition key. You might choose a Region to optimize latency, minimize costs, or -// address regulatory requirements. For example, if you reside in Europe, you will -// probably find it advantageous to create buckets in the Europe (Ireland) Region. -// For more information, see Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) -// . If you send your create bucket request to the s3.amazonaws.com endpoint, the -// request goes to the us-east-1 Region. Accordingly, the signature calculations -// in Signature Version 4 must use us-east-1 as the Region, even if the location -// constraint in the request specifies another Region where the bucket is to be -// created. If you create a bucket in a Region other than US East (N. Virginia), -// your application must be able to handle 307 redirect. For more information, see -// Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) -// . Permissions In addition to s3:CreateBucket , the following permissions are -// required when your CreateBucket request includes specific headers: -// - Access control lists (ACLs) - If your CreateBucket request specifies access -// control list (ACL) permissions and the ACL is public-read, public-read-write, -// authenticated-read, or if you specify access permissions explicitly through any -// other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If -// the ACL for the CreateBucket request is private or if the request doesn't -// specify any ACLs, only s3:CreateBucket permission is needed. -// - Object Lock - If ObjectLockEnabledForBucket is set to true in your -// CreateBucket request, s3:PutBucketObjectLockConfiguration and -// s3:PutBucketVersioning permissions are required. +// you become the bucket owner. There are two types of buckets: general purpose +// buckets and directory buckets. For more information about these bucket types, +// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// in the Amazon S3 User Guide. +// - General purpose buckets - If you send your CreateBucket request to the +// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So +// the signature calculations in Signature Version 4 must use us-east-1 as the +// Region, even if the location constraint in the request specifies another Region +// where the bucket is to be created. If you create a bucket in a Region other than +// US East (N. Virginia), your application must be able to handle 307 redirect. For +// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) +// in the Amazon S3 User Guide. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - In addition to the s3:CreateBucket +// permission, the following permissions are required in a policy when your +// CreateBucket request includes specific headers: +// - Access control lists (ACLs) - In your CreateBucket request, if you specify +// an access control list (ACL) and set it to public-read , public-read-write , +// authenticated-read , or if you explicitly specify any other custom ACLs, both +// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your +// CreateBucket request, if you set the ACL to private , or if you don't specify +// any ACLs, only the s3:CreateBucket permission is required. +// - Object Lock - In your CreateBucket request, if you set +// x-amz-bucket-object-lock-enabled to true, the +// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are +// required. // - S3 Object Ownership - If your CreateBucket request includes the // x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. By default, ObjectOwnership is set to -// BucketOWnerEnforced and ACLs are disabled. We recommend keeping ACLs disabled, -// except in uncommon use cases where you must control access for each object -// individually. If you want to change the ObjectOwnership setting, you can use -// the x-amz-object-ownership header in your CreateBucket request to set the -// ObjectOwnership setting of your choice. For more information about S3 Object -// Ownership, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// permission is required. If your CreateBucket request sets BucketOwnerEnforced +// for Amazon S3 Object Ownership and specifies a bucket ACL that provides access +// to an external Amazon Web Services account, your request fails with a 400 +// error and returns the InvalidBucketAcLWithObjectOwnership error code. For more +// information, see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) // in the Amazon S3 User Guide. // - S3 Block Public Access - If your specific use case requires granting public -// access to your S3 resources, you can disable Block Public Access. You can create -// a new bucket with Block Public Access enabled, then separately call the -// DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// access to your S3 resources, you can disable Block Public Access. Specifically, +// you can create a new bucket with Block Public Access enabled, then separately +// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) // API. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. By default, all Block Public Access settings are enabled for new -// buckets. To avoid inadvertent exposure of your resources, we recommend keeping -// the S3 Block Public Access settings enabled. For more information about S3 Block -// Public Access, see Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// permission. For more information about S3 Block Public Access, see Blocking +// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see Amazon +// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object +// Ownership, and S3 Block Public Access are not supported for directory buckets. +// For directory buckets, all Block Public Access settings are enabled at the +// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs +// disabled). These settings can't be modified. For more information about +// permissions for creating and working with directory buckets, see Directory +// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. For more information about supported S3 features +// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) // in the Amazon S3 User Guide. // -// If your CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object -// Ownership and specifies a bucket ACL that provides access to an external Amazon -// Web Services account, your request fails with a 400 error and returns the -// InvalidBucketAcLWithObjectOwnership error code. For more information, see -// Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) -// in the Amazon S3 User Guide. The following operations are related to +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to // CreateBucket : // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) @@ -92,36 +108,51 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op type CreateBucketInput struct { - // The name of the bucket to create. + // The name of the bucket to create. General purpose buckets - For information + // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) + // in the Amazon S3 User Guide. Directory buckets - When you use this operation + // with a directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The canned ACL to apply to the bucket. + // The canned ACL to apply to the bucket. This functionality is not supported for + // directory buckets. ACL types.BucketCannedACL // The configuration information for the bucket. CreateBucketConfiguration *types.CreateBucketConfiguration // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. + // bucket. This functionality is not supported for directory buckets. GrantFullControl *string - // Allows grantee to list the objects in the bucket. + // Allows grantee to list the objects in the bucket. This functionality is not + // supported for directory buckets. GrantRead *string - // Allows grantee to read the bucket ACL. + // Allows grantee to read the bucket ACL. This functionality is not supported for + // directory buckets. GrantReadACP *string // Allows grantee to create new objects in the bucket. For the bucket and object // owners of existing objects, also allows deletions and overwrites of those - // objects. + // objects. This functionality is not supported for directory buckets. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. + // Allows grantee to write the ACL for the applicable bucket. This functionality + // is not supported for directory buckets. GrantWriteACP *string // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // This functionality is not supported for directory buckets. ObjectLockEnabledForBucket *bool // The container element for object ownership for a bucket's ownership controls. @@ -132,9 +163,16 @@ type CreateBucketInput struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or bucket owner full control ACLs, such as the - // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed - // in the XML format. + // specify an ACL or specify bucket owner full control ACLs (such as the predefined + // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced + // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon + // use cases where you must control access for each object individually. For more + // information about S3 Object Ownership, see Controlling ownership of objects and + // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Directory buckets use the bucket owner enforced setting for S3 Object + // Ownership. ObjectOwnership types.ObjectOwnership noSmithyDocumentSerde @@ -142,6 +180,7 @@ type CreateBucketInput struct { func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) p.DisableAccessPoints = ptr.Bool(true) } @@ -211,6 +250,9 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpCreateBucketValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_CreateMultipartUpload.go b/service/s3/api_op_CreateMultipartUpload.go index 739a590b196..4f24e11c464 100644 --- a/service/s3/api_op_CreateMultipartUpload.go +++ b/service/s3/api_op_CreateMultipartUpload.go @@ -21,97 +21,111 @@ import ( // ). You also include this upload ID in the final request to either complete or // abort the multipart upload request. For more information about multipart // uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// . If you have configured a lifecycle rule to abort incomplete multipart uploads, -// the upload must complete within the number of days specified in the bucket -// lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort action and Amazon S3 aborts the multipart upload. For more -// information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// . For information about the permissions required to use the multipart upload -// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// . For request signing, multipart upload is just a series of regular requests. -// You initiate a multipart upload, send one or more requests to upload parts, and -// then complete the multipart upload process. You sign each request individually. -// There is nothing special about signing multipart upload requests. For more -// information about signing, see Authenticating Requests (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// . After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or abort -// the multipart upload. Amazon S3 frees up the space used to store the parts and -// stop charging you for storing them only after you either complete or abort a -// multipart upload. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. Amazon S3 automatically encrypts all new objects that are -// uploaded to an S3 bucket. When doing a multipart upload, if you don't specify -// encryption information in your request, the encryption setting of the uploaded -// parts is set to the default encryption configuration of the destination bucket. -// By default, all buckets have a base level of encryption configuration that uses -// server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination -// bucket has a default encryption configuration that uses server-side encryption -// with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided -// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a -// customer-provided key to encrypt the uploaded parts. When you perform a -// CreateMultipartUpload operation, if you want to use a different type of -// encryption setting for the uploaded parts, you can request that Amazon S3 -// encrypts the object with a KMS key, an Amazon S3 managed key, or a -// customer-provided key. If the encryption setting in your request is different -// from the default encryption configuration of the destination bucket, the -// encryption setting in your request takes precedence. If you choose to provide -// your own encryption key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the request to initiate the upload -// by using CreateMultipartUpload . You can request that Amazon S3 save the -// uploaded parts encrypted with server-side encryption with an Amazon S3 managed -// key (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a -// customer-provided encryption key (SSE-C). To perform a multipart upload with -// encryption by using an Amazon Web Services KMS key, the requester must have -// permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. -// These permissions are required because Amazon S3 must decrypt and read data from -// the encrypted file parts before it completes the multipart upload. For more -// information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user -// or role is in the same Amazon Web Services account as the KMS key, then you must -// have these permissions on the key policy. If your IAM user or role belongs to a -// different account than the key, then you must have the permissions on both the -// key policy and your IAM user or role. For more information, see Protecting Data -// Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// . Access Permissions When copying an object, you can optionally specify the -// accounts or groups that should be granted specific permissions on the new -// object. There are two ways to grant the permissions using the request headers: -// - Specify a canned ACL with the x-amz-acl request header. For more -// information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . -// - Specify access permissions explicitly with the x-amz-grant-read , -// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control -// headers. These parameters map to the set of permissions that Amazon S3 supports -// in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 User Guide. After you initiate a multipart upload and upload +// one or more parts, to stop being charged for storing the uploaded parts, you +// must either complete or abort the multipart upload. Amazon S3 frees up the space +// used to store the parts and stops charging you for storing them only after you +// either complete or abort a multipart upload. If you have configured a lifecycle +// rule to abort incomplete multipart uploads, the created multipart upload must be +// completed within the number of days specified in the bucket lifecycle +// configuration. Otherwise, the incomplete multipart upload becomes eligible for +// an abort action and Amazon S3 aborts the multipart upload. For more information, +// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// . +// - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Request signing For request signing, multipart upload is just a series of +// regular requests. You initiate a multipart upload, send one or more requests to +// upload parts, and then complete the multipart upload process. You sign each +// request individually. There is nothing special about signing multipart upload +// requests. For more information about signing, see Authenticating Requests +// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about the permissions +// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. To perform a multipart upload with encryption by +// using an Amazon Web Services KMS key, the requester must have permission to the +// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are +// required because Amazon S3 must decrypt and read data from the encrypted file +// parts before it completes the multipart upload. For more information, see +// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) // . // -// You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Server-Side- Encryption-Specific Request Headers Amazon S3 -// encrypts data by using server-side encryption with an Amazon S3 managed key -// (SSE-S3) by default. Server-side encryption is for data encryption at rest. -// Amazon S3 encrypts your data as it writes it to disks in its data centers and -// decrypts it when you access it. You can request that Amazon S3 encrypts data at -// rest by using server-side encryption with other key options. The option you use -// depends on whether you want to use KMS keys (SSE-KMS) or provide your own -// encryption keys (SSE-C). +// Encryption +// - General purpose buckets - Server-side encryption is for data encryption at +// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. Amazon S3 automatically encrypts all new +// objects that are uploaded to an S3 bucket. When doing a multipart upload, if you +// don't specify encryption information in your request, the encryption setting of +// the uploaded parts is set to the default encryption configuration of the +// destination bucket. By default, all buckets have a base level of encryption +// configuration that uses server-side encryption with Amazon S3 managed keys +// (SSE-S3). If the destination bucket has a default encryption configuration that +// uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), +// or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding +// KMS key, or a customer-provided key to encrypt the uploaded parts. When you +// perform a CreateMultipartUpload operation, if you want to use a different type +// of encryption setting for the uploaded parts, you can request that Amazon S3 +// encrypts the object with a different encryption key (such as an Amazon S3 +// managed key, a KMS key, or a customer-provided key). When the encryption setting +// in your request is different from the default encryption configuration of the +// destination bucket, the encryption setting in your request takes precedence. If +// you choose to provide your own encryption key, the request headers you provide +// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the CreateMultipartUpload request. // - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( // aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) // – If you want Amazon Web Services to manage the keys used to encrypt data, // specify the following headers in the request. // - x-amz-server-side-encryption // - x-amz-server-side-encryption-aws-kms-key-id -// - x-amz-server-side-encryption-context If you specify -// x-amz-server-side-encryption:aws:kms , but don't provide +// - x-amz-server-side-encryption-context +// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web -// Services managed key ( aws/s3 key) in KMS to protect the data. All GET and PUT -// requests for an object protected by KMS fail if you don't make them by using -// Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version -// 4. For more information about server-side encryption with KMS keys (SSE-KMS), -// see Protecting Data Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// . +// Services managed key ( aws/s3 key) in KMS to protect the data. +// - To perform a multipart upload with encryption by using an Amazon Web +// Services KMS key, the requester must have permission to the kms:Decrypt and +// kms:GenerateDataKey* actions on the key. These permissions are required +// because Amazon S3 must decrypt and read data from the encrypted file parts +// before it completes the multipart upload. For more information, see Multipart +// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - If your Identity and Access Management (IAM) user or role is in the same +// Amazon Web Services account as the KMS key, then you must have these permissions +// on the key policy. If your IAM user or role is in a different account from the +// key, then you must have the permissions on both the key policy and your IAM user +// or role. +// - All GET and PUT requests for an object protected by KMS fail if you don't +// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), +// or Signature Version 4. For information about configuring any of the officially +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see +// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) +// in the Amazon S3 User Guide. For more information about server-side +// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. // - Use customer-provided encryption keys (SSE-C) – If you want to manage your // own encryption keys, provide all the following headers in the request. // - x-amz-server-side-encryption-customer-algorithm @@ -120,55 +134,13 @@ import ( // server-side encryption with customer-provided encryption keys (SSE-C), see // Protecting data using server-side encryption with customer-provided encryption // keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) -// . +// in the Amazon S3 User Guide. +// - Directory buckets -For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. // -// Access-Control-List (ACL)-Specific Request Headers You also can use the -// following access control–related headers with this operation. By default, all -// objects are private. Only the owner has full access control. When adding a new -// object, you can grant permissions to individual Amazon Web Services accounts or -// to predefined groups defined by Amazon S3. These permissions are then added to -// the access control list (ACL) on the object. For more information, see Using -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) . -// With this operation, you can grant access permissions using one of the following -// two methods: -// - Specify a canned ACL ( x-amz-acl ) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and -// permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . -// - Specify access permissions explicitly — To explicitly grant access -// permissions to specific Amazon Web Services accounts or groups, use the -// following headers. Each header maps to specific permissions that Amazon S3 -// supports in an ACL. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) . -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly, use: -// - x-amz-grant-read -// - x-amz-grant-write -// - x-amz-grant-read-acp -// - x-amz-grant-write-acp -// - x-amz-grant-full-control You specify each grantee as a type=value pair, -// where the type is one of the following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified by -// account IDs permissions to read object data and its metadata: -// x-amz-grant-read: id="11112222333", id="444455556666" -// -// The following operations are related to CreateMultipartUpload : +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CreateMultipartUpload : // - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) @@ -191,16 +163,26 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip type CreateMultipartUploadInput struct { - // The name of the bucket to which to initiate the upload When using this action - // with an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the bucket where the multipart upload is initiated and where the + // object is uploaded. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -215,22 +197,33 @@ type CreateMultipartUploadInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. This action is not supported by Amazon - // S3 on Outposts. + // The canned ACL to apply to the object. Amazon S3 supports a set of predefined + // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and + // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. By default, all objects are private. Only the owner + // has full access control. When uploading an object, you can grant access + // permissions to individual Amazon Web Services accounts or to predefined groups + // defined by Amazon S3. These permissions are then added to the access control + // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) + // . One way to grant the permissions using the request headers is to specify a + // canned ACL with the x-amz-acl request header. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with an object action doesn’t - // affect bucket-level settings for S3 Bucket Key. + // affect bucket-level settings for S3 Bucket Key. This functionality is not + // supported for directory buckets. BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // Indicates the algorithm you want Amazon S3 to use to create the checksum for - // the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumAlgorithm types.ChecksumAlgorithm @@ -239,108 +232,226 @@ type CreateMultipartUploadInput struct { // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. + // Content-Type header field. For directory buckets, only the aws-chunked value is + // supported in this header field. ContentEncoding *string - // The language the content is in. + // The language that the content is in. ContentLanguage *string // A standard MIME type describing the format of the object data. ContentType *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. Expires *time.Time - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This - // action is not supported by Amazon S3 on Outposts. + // Specify access permissions explicitly to give the grantee READ, READ_ACP, and + // WRITE_ACP permissions on the object. By default, all objects are private. Only + // the owner has full access control. When uploading an object, you can use this + // header to explicitly grant access permissions to specific Amazon Web Services + // accounts or groups. This header maps to specific permissions that Amazon S3 + // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to read the object data and its metadata. This action is not - // supported by Amazon S3 on Outposts. + // Specify access permissions explicitly to allow grantee to read the object data + // and its metadata. By default, all objects are private. Only the owner has full + // access control. When uploading an object, you can use this header to explicitly + // grant access permissions to specific Amazon Web Services accounts or groups. + // This header maps to specific permissions that Amazon S3 supports in an ACL. For + // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the object ACL. This action is not supported by Amazon - // S3 on Outposts. + // Specify access permissions explicitly to allows grantee to read the object ACL. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to write the ACL for the applicable object. This action is not - // supported by Amazon S3 on Outposts. + // Specify access permissions explicitly to allows grantee to allow grantee to + // write the ACL for the applicable object. By default, all objects are private. + // Only the owner has full access control. When uploading an object, you can use + // this header to explicitly grant access permissions to specific Amazon Web + // Services accounts or groups. This header maps to specific permissions that + // Amazon S3 supports in an ACL. For more information, see Access Control List + // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string - // Specifies whether you want to apply a legal hold to the uploaded object. + // Specifies whether you want to apply a legal hold to the uploaded object. This + // functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Specifies the Object Lock mode that you want to apply to the uploaded object. + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // Specifies the date and time when you want the Object Lock to expire. + // Specifies the date and time when you want the Object Lock to expire. This + // functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity check + // to ensure that the encryption key was transmitted without error. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This functionality is not + // supported for directory buckets. SSEKMSEncryptionContext *string // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption - // customer managed key to use for object encryption. All GET and PUT requests for - // an object protected by KMS will fail if they're not made via SSL or using SigV4. - // For information about configuring any of the officially supported Amazon Web - // Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version - // in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. + // customer managed key to use for object encryption. This functionality is not + // supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For - // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // - For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass types.StorageClass - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // The tag-set for the object. The tag-set must be encoded as URL Query + // parameters. This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde @@ -360,35 +471,25 @@ type CreateMultipartUploadOutput struct { // indicates when the initiated multipart upload becomes eligible for an abort // operation. For more information, see Aborting Incomplete Multipart Uploads // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // . The response also includes the x-amz-abort-rule-id header that provides the - // ID of the lifecycle configuration rule that defines this action. + // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id + // header that provides the ID of the lifecycle configuration rule that defines the + // abort action. This functionality is not supported for directory buckets. AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. + // incomplete multipart uploads. This functionality is not supported for directory + // buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. When using this - // action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // return the access point ARN or access point alias if used. Access points are not + // supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. BucketKeyEnabled *bool // The algorithm that was used to create a checksum of the object. @@ -398,30 +499,35 @@ type CreateMultipartUploadOutput struct { Key *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header confirming the encryption - // algorithm used. + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide round-trip message - // integrity verification of the customer-provided encryption key. + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. + // holding JSON with the encryption context key-value pairs. This functionality is + // not supported for directory buckets. SSEKMSEncryptionContext *string - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // ID for the initiated multipart upload. @@ -488,6 +594,9 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -521,6 +630,9 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_CreateSession.go b/service/s3/api_op_CreateSession.go new file mode 100644 index 00000000000..1cb1f151296 --- /dev/null +++ b/service/s3/api_op_CreateSession.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a session that establishes temporary security credentials to support +// fast authentication and authorization for the Zonal endpoint APIs on directory +// buckets. For more information about Zonal endpoint APIs that include the +// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) +// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory +// bucket, use the CreateSession API operation. Specifically, you grant +// s3express:CreateSession permission to a bucket in a bucket policy or an IAM +// identity-based policy. Then, you use IAM credentials to make the CreateSession +// API request on the bucket, which returns temporary security credentials that +// include the access key ID, secret access key, session token, and expiration. +// These credentials have associated permissions to access the Zonal endpoint APIs. +// After the session is created, you don’t need to use other policies to grant +// permissions to each Zonal endpoint API individually. Instead, in your Zonal +// endpoint API requests, you sign your requests by applying the temporary security +// credentials of the session to the request headers and following the SigV4 +// protocol for authentication. You also apply the session token to the +// x-amz-s3session-token request header for authorization. Temporary security +// credentials are scoped to the bucket and expire after 5 minutes. After the +// expiration time, any calls that you make with those credentials will fail. You +// must use IAM credentials again to make a CreateSession API request that +// generates a new set of temporary credentials for use. Temporary credentials +// cannot be extended or refreshed beyond the original specified interval. If you +// use Amazon Web Services SDKs, SDKs handle the session token refreshes +// automatically to avoid service interruptions when a session expires. We +// recommend that you use the Amazon Web Services SDKs to initiate and manage +// requests to the CreateSession API. For more information, see Performance +// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) +// in the Amazon S3 User Guide. +// - You must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests +// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject +// API operation doesn't use the temporary security credentials returned from the +// CreateSession API operation for authentication and authorization. For +// information about authentication and authorization of the CopyObject API +// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// . +// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket +// API operation doesn't use the temporary security credentials returned from the +// CreateSession API operation for authentication and authorization. For +// information about authentication and authorization of the HeadBucket API +// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// . +// +// Permissions To obtain temporary security credentials, you must create a bucket +// policy or an IAM identity-based policy that grants s3express:CreateSession +// permission to the bucket. In a policy, you can have the s3express:SessionMode +// condition key to control who can create a ReadWrite or ReadOnly session. For +// more information about ReadWrite or ReadOnly sessions, see +// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters) +// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint +// APIs, the bucket policy should also grant both accounts the +// s3express:CreateSession permission. HTTP Host header syntax Directory buckets - +// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { + if params == nil { + params = &CreateSessionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateSession", params, optFns, c.addOperationCreateSessionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateSessionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateSessionInput struct { + + // The name of the bucket that you create a session for. + // + // This member is required. + Bucket *string + + // Specifies the mode of the session that will be created, either ReadWrite or + // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is + // capable of executing all the Zonal endpoint APIs on a directory bucket. A + // ReadOnly session is constrained to execute the following Zonal endpoint APIs: + // GetObject , HeadObject , ListObjectsV2 , GetObjectAttributes , ListParts , and + // ListMultipartUploads . + SessionMode types.SessionMode + + noSmithyDocumentSerde +} + +func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.DisableS3ExpressSessionAuth = ptr.Bool(true) +} + +type CreateSessionOutput struct { + + // The established temporary security credentials for the created session.. + // + // This member is required. + Credentials *types.SessionCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateSession{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateSession{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSession"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpCreateSessionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSession(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addCreateSessionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *CreateSessionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateSession(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateSession", + } +} + +// getCreateSessionBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getCreateSessionBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateSessionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateSessionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateSessionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/service/s3/api_op_DeleteBucket.go b/service/s3/api_op_DeleteBucket.go index 6acd7a0e82d..34645bb9a2a 100644 --- a/service/s3/api_op_DeleteBucket.go +++ b/service/s3/api_op_DeleteBucket.go @@ -9,12 +9,37 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. -// The following operations are related to DeleteBucket : +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - You must have the s3:DeleteBucket +// permission on the specified bucket in a policy. +// - Directory bucket permissions - You must have the s3express:DeleteBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see Amazon +// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// DeleteBucket : // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { @@ -34,14 +59,24 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op type DeleteBucketInput struct { - // Specifies the bucket being deleted. + // Specifies the bucket being deleted. Directory buckets - When you use this + // operation with a directory bucket, you must use path-style requests in the + // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde @@ -49,7 +84,7 @@ type DeleteBucketInput struct { func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketOutput struct { @@ -114,6 +149,9 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index 131acae6659..55d8fc1c9e8 100644 --- a/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -9,13 +9,15 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an analytics configuration for the bucket (specified by the analytics -// configuration ID). To use this operation, you must have permissions to perform -// the s3:PutAnalyticsConfiguration action. The bucket owner has this permission -// by default. The bucket owner can grant this permission to others. For more +// This operation is not supported by directory buckets. Deletes an analytics +// configuration for the bucket (specified by the analytics configuration ID). To +// use this operation, you must have permissions to perform the +// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource // Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) @@ -52,9 +54,9 @@ type DeleteBucketAnalyticsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -62,7 +64,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketAnalyticsConfigurationOutput struct { @@ -127,6 +129,9 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketCors.go b/service/s3/api_op_DeleteBucketCors.go index e9ef6474761..9a544922e48 100644 --- a/service/s3/api_op_DeleteBucketCors.go +++ b/service/s3/api_op_DeleteBucketCors.go @@ -9,13 +9,15 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the cors configuration information set for the bucket. To use this -// operation, you must have permission to perform the s3:PutBucketCORS action. The -// bucket owner has this permission by default and can grant this permission to -// others. For information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// This operation is not supported by directory buckets. Deletes the cors +// configuration information set for the bucket. To use this operation, you must +// have permission to perform the s3:PutBucketCORS action. The bucket owner has +// this permission by default and can grant this permission to others. For +// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) // in the Amazon S3 User Guide. Related Resources // - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) // - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) @@ -41,9 +43,9 @@ type DeleteBucketCorsInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -51,7 +53,7 @@ type DeleteBucketCorsInput struct { func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketCorsOutput struct { @@ -116,6 +118,9 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketEncryption.go b/service/s3/api_op_DeleteBucketEncryption.go index 444d76246c5..0ef6f93ec41 100644 --- a/service/s3/api_op_DeleteBucketEncryption.go +++ b/service/s3/api_op_DeleteBucketEncryption.go @@ -9,13 +9,14 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the DELETE action resets the default encryption for the -// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). For -// information about the bucket default encryption feature, see Amazon S3 Bucket -// Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// This operation is not supported by directory buckets. This implementation of +// the DELETE action resets the default encryption for the bucket as server-side +// encryption with Amazon S3 managed keys (SSE-S3). For information about the +// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. To use this operation, you must have permissions to // perform the s3:PutEncryptionConfiguration action. The bucket owner has this // permission by default. The bucket owner can grant this permission to others. For @@ -49,9 +50,9 @@ type DeleteBucketEncryptionInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -59,7 +60,7 @@ type DeleteBucketEncryptionInput struct { func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketEncryptionOutput struct { @@ -124,6 +125,9 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go index 1566d84ff83..eeaefc5f299 100644 --- a/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ b/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -9,11 +9,13 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The -// S3 Intelligent-Tiering storage class is designed to optimize storage costs by +// This operation is not supported by directory buckets. Deletes the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access @@ -63,7 +65,7 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketIntelligentTieringConfigurationOutput struct { @@ -128,6 +130,9 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/service/s3/api_op_DeleteBucketInventoryConfiguration.go index 67bb38df49c..28a93e0dcb0 100644 --- a/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -9,15 +9,16 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an inventory configuration (identified by the inventory ID) from the -// bucket. To use this operation, you must have permissions to perform the -// s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. Deletes an inventory +// configuration (identified by the inventory ID) from the bucket. To use this +// operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) // . Operations related to DeleteBucketInventoryConfiguration include: @@ -51,9 +52,9 @@ type DeleteBucketInventoryConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -61,7 +62,7 @@ type DeleteBucketInventoryConfigurationInput struct { func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketInventoryConfigurationOutput struct { @@ -126,6 +127,9 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketLifecycle.go b/service/s3/api_op_DeleteBucketLifecycle.go index d87964d4a3d..d7b8eb58178 100644 --- a/service/s3/api_op_DeleteBucketLifecycle.go +++ b/service/s3/api_op_DeleteBucketLifecycle.go @@ -9,17 +9,19 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the lifecycle configuration from the specified bucket. Amazon S3 -// removes all the lifecycle configuration rules in the lifecycle subresource -// associated with the bucket. Your objects never expire, and Amazon S3 no longer -// automatically deletes any objects on the basis of rules contained in the deleted -// lifecycle configuration. To use this operation, you must have permission to -// perform the s3:PutLifecycleConfiguration action. By default, the bucket owner -// has this permission and the bucket owner can grant this permission to others. -// There is usually some time lag before lifecycle configuration deletion is fully +// This operation is not supported by directory buckets. Deletes the lifecycle +// configuration from the specified bucket. Amazon S3 removes all the lifecycle +// configuration rules in the lifecycle subresource associated with the bucket. +// Your objects never expire, and Amazon S3 no longer automatically deletes any +// objects on the basis of rules contained in the deleted lifecycle configuration. +// To use this operation, you must have permission to perform the +// s3:PutLifecycleConfiguration action. By default, the bucket owner has this +// permission and the bucket owner can grant this permission to others. There is +// usually some time lag before lifecycle configuration deletion is fully // propagated to all the Amazon S3 systems. For more information about the object // expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions) // . Related actions include: @@ -47,9 +49,9 @@ type DeleteBucketLifecycleInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -57,7 +59,7 @@ type DeleteBucketLifecycleInput struct { func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketLifecycleOutput struct { @@ -122,6 +124,9 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/service/s3/api_op_DeleteBucketMetricsConfiguration.go index f9c8d6be6d2..a6675a8801f 100644 --- a/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -9,16 +9,18 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a metrics configuration for the Amazon CloudWatch request metrics -// (specified by the metrics configuration ID) from the bucket. Note that this -// doesn't include the daily storage metrics. To use this operation, you must have -// permissions to perform the s3:PutMetricsConfiguration action. The bucket owner -// has this permission by default. The bucket owner can grant this permission to -// others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. Deletes a metrics +// configuration for the Amazon CloudWatch request metrics (specified by the +// metrics configuration ID) from the bucket. Note that this doesn't include the +// daily storage metrics. To use this operation, you must have permissions to +// perform the s3:PutMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . For information about CloudWatch request metrics for Amazon S3, see // Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) @@ -55,9 +57,9 @@ type DeleteBucketMetricsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -65,7 +67,7 @@ type DeleteBucketMetricsConfigurationInput struct { func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketMetricsConfigurationOutput struct { @@ -130,6 +132,9 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketOwnershipControls.go b/service/s3/api_op_DeleteBucketOwnershipControls.go index 0000e6cf8ad..1b53e08e9bf 100644 --- a/service/s3/api_op_DeleteBucketOwnershipControls.go +++ b/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -9,12 +9,14 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:PutBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// This operation is not supported by directory buckets. Removes OwnershipControls +// for an Amazon S3 bucket. To use this operation, you must have the +// s3:PutBucketOwnershipControls permission. For more information about Amazon S3 +// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html) // . The following operations are related to DeleteBucketOwnershipControls : // - GetBucketOwnershipControls @@ -41,9 +43,9 @@ type DeleteBucketOwnershipControlsInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -51,7 +53,7 @@ type DeleteBucketOwnershipControlsInput struct { func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketOwnershipControlsOutput struct { @@ -116,6 +118,9 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketPolicy.go b/service/s3/api_op_DeleteBucketPolicy.go index 3874598e9e0..0d8e77ff734 100644 --- a/service/s3/api_op_DeleteBucketPolicy.go +++ b/service/s3/api_op_DeleteBucketPolicy.go @@ -9,26 +9,46 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the DELETE action uses the policy subresource to delete -// the policy of a specified bucket. If you are using an identity other than the -// root user of the Amazon Web Services account that owns the bucket, the calling -// identity must have the DeleteBucketPolicy permissions on the specified bucket -// and belong to the bucket owner's account to use this operation. If you don't -// have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied -// error. If you have the correct permissions, but you're not using an identity -// that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not -// Allowed error. To ensure that bucket owners don't inadvertently lock themselves -// out of their own buckets, the root principal in a bucket owner's Amazon Web -// Services account can perform the GetBucketPolicy , PutBucketPolicy , and -// DeleteBucketPolicy API actions, even if their bucket policy explicitly denies -// the root principal's access. Bucket owner root principals can only be blocked -// from performing these API actions by VPC endpoint policies and Amazon Web -// Services Organizations policies. For more information about bucket policies, see -// Using Bucket Policies and UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . The following operations are related to DeleteBucketPolicy +// Deletes the policy of a specified bucket. Directory buckets - For directory +// buckets, you must make requests for this API operation to the Regional endpoint. +// These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the DeleteBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns +// a 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// DeleteBucketPolicy // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { @@ -48,14 +68,24 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol type DeleteBucketPolicyInput struct { - // The bucket name. + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde @@ -63,7 +93,7 @@ type DeleteBucketPolicyInput struct { func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketPolicyOutput struct { @@ -128,6 +158,9 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketReplication.go b/service/s3/api_op_DeleteBucketReplication.go index bd12eecf291..7ac11a7ba9a 100644 --- a/service/s3/api_op_DeleteBucketReplication.go +++ b/service/s3/api_op_DeleteBucketReplication.go @@ -9,14 +9,15 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the replication configuration from the bucket. To use this operation, -// you must have permissions to perform the s3:PutReplicationConfiguration action. -// The bucket owner has these permissions by default and can grant it to others. -// For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. Deletes the replication +// configuration from the bucket. To use this operation, you must have permissions +// to perform the s3:PutReplicationConfiguration action. The bucket owner has +// these permissions by default and can grant it to others. For more information +// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . It can take a while for the deletion of a replication configuration to fully // propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) @@ -46,9 +47,9 @@ type DeleteBucketReplicationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -56,7 +57,7 @@ type DeleteBucketReplicationInput struct { func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketReplicationOutput struct { @@ -121,6 +122,9 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketTagging.go b/service/s3/api_op_DeleteBucketTagging.go index a9306784f48..5a75a1f4ac4 100644 --- a/service/s3/api_op_DeleteBucketTagging.go +++ b/service/s3/api_op_DeleteBucketTagging.go @@ -9,13 +9,15 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the tags from the bucket. To use this operation, you must have -// permission to perform the s3:PutBucketTagging action. By default, the bucket -// owner has this permission and can grant this permission to others. The following -// operations are related to DeleteBucketTagging : +// This operation is not supported by directory buckets. Deletes the tags from the +// bucket. To use this operation, you must have permission to perform the +// s3:PutBucketTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. The following operations are related to +// DeleteBucketTagging : // - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) // - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { @@ -40,9 +42,9 @@ type DeleteBucketTaggingInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -50,7 +52,7 @@ type DeleteBucketTaggingInput struct { func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketTaggingOutput struct { @@ -115,6 +117,9 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteBucketWebsite.go b/service/s3/api_op_DeleteBucketWebsite.go index 9eccc2465ef..dbe84fbb9fe 100644 --- a/service/s3/api_op_DeleteBucketWebsite.go +++ b/service/s3/api_op_DeleteBucketWebsite.go @@ -9,20 +9,21 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action removes the website configuration for a bucket. Amazon S3 returns a -// 200 OK response upon successfully deleting a website configuration on the -// specified bucket. You will get a 200 OK response if the website configuration -// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 -// response if the bucket specified in the request does not exist. This DELETE -// action requires the S3:DeleteBucketWebsite permission. By default, only the -// bucket owner can delete the website configuration attached to a bucket. However, -// bucket owners can grant other users permission to delete the website -// configuration by writing a bucket policy granting them the -// S3:DeleteBucketWebsite permission. For more information about hosting websites, -// see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// This operation is not supported by directory buckets. This action removes the +// website configuration for a bucket. Amazon S3 returns a 200 OK response upon +// successfully deleting a website configuration on the specified bucket. You will +// get a 200 OK response if the website configuration you are trying to delete +// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket +// specified in the request does not exist. This DELETE action requires the +// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete +// the website configuration attached to a bucket. However, bucket owners can grant +// other users permission to delete the website configuration by writing a bucket +// policy granting them the S3:DeleteBucketWebsite permission. For more +// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) // . The following operations are related to DeleteBucketWebsite : // - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) // - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) @@ -48,9 +49,9 @@ type DeleteBucketWebsiteInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -58,7 +59,7 @@ type DeleteBucketWebsiteInput struct { func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeleteBucketWebsiteOutput struct { @@ -123,6 +124,9 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteObject.go b/service/s3/api_op_DeleteObject.go index 09c4bcc5ef3..cd00e5998af 100644 --- a/service/s3/api_op_DeleteObject.go +++ b/service/s3/api_op_DeleteObject.go @@ -13,24 +13,69 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a null -// version, Amazon S3 does not remove any objects but will still respond that the -// command was successful. To remove a specific version, you must use the version -// Id subresource. Using this subresource permanently deletes the version. If the -// object deleted is a delete marker, Amazon S3 sets the response header, -// x-amz-delete-marker , to true. If the object you want to delete is in a bucket -// where the bucket versioning configuration is MFA Delete enabled, you must -// include the x-amz-mfa request header in the DELETE versionId request. Requests -// that include x-amz-mfa must use HTTPS. For more information about MFA Delete, -// see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) -// . To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) -// . You can delete objects by explicitly calling DELETE Object or configure its -// lifecycle ( PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// Removes an object from a bucket. The behavior depends on the bucket's +// versioning state: +// +// - If versioning is enabled, the operation removes the null version (if there +// is one) of an object and inserts a delete marker, which becomes the latest +// version of the object. If there isn't a null version, Amazon S3 does not remove +// any objects but will still respond that the command was successful. +// +// - If versioning is suspended or not enabled, the operation permanently +// deletes the object. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// To remove a specific version, you must use the versionId query parameter. Using +// this query parameter permanently deletes the version. If the object deleted is a +// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) +// in the Amazon S3 User Guide. To see sample requests that use versioning, see +// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) +// . Directory buckets - MFA delete is not supported by directory buckets. You can +// delete objects by explicitly calling DELETE Object or calling ( +// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) // ) to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny them // the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration -// actions. The following action is related to DeleteObject : +// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. +// Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// - s3:DeleteObject - To delete an object from a bucket, you must always have +// the s3:DeleteObject permission. +// - s3:DeleteObjectVersion - To delete a specific version of an object from a +// versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is +// related to DeleteObject : // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { if params == nil { @@ -49,16 +94,26 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op type DeleteObjectInput struct { - // The bucket name of the bucket containing the object. When using this action - // with an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The bucket name of the bucket containing the object. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -75,30 +130,34 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. + // s3:BypassGovernanceRetention permission. This functionality is not supported for + // directory buckets. BypassGovernanceRetention *bool - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. + // delete enabled. This functionality is not supported for directory buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. For directory + // buckets in this API operation, only the null value of the version ID is + // supported. VersionId *string noSmithyDocumentSerde @@ -115,15 +174,16 @@ type DeleteObjectOutput struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. + // object is a delete marker. This functionality is not supported for directory + // buckets. DeleteMarker *bool // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Returns the version ID of the delete marker created as a result of the DELETE - // operation. + // operation. This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -187,6 +247,9 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteObjectTagging.go b/service/s3/api_op_DeleteObjectTagging.go index 1fd67d48d53..961b9459a4a 100644 --- a/service/s3/api_op_DeleteObjectTagging.go +++ b/service/s3/api_op_DeleteObjectTagging.go @@ -12,8 +12,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the entire tag set from the specified object. For more information -// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) +// This operation is not supported by directory buckets. Removes the entire tag +// set from the specified object. For more information about managing object tags, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) // . To use this operation, you must have permission to perform the // s3:DeleteObjectTagging action. To delete tags of a specific object version, add // the versionId query parameter in the request. You will need permission for the @@ -38,16 +39,18 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa type DeleteObjectTaggingInput struct { - // The bucket name containing the objects from which to remove the tags. When - // using this action with an access point, you must direct requests to the access - // point hostname. The access point hostname takes the form + // The bucket name containing the objects from which to remove the tags. Access + // points - When you use this action with an access point, you must provide the + // alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -62,9 +65,9 @@ type DeleteObjectTaggingInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The versionId of the object that the tag-set will be removed from. @@ -144,6 +147,9 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_DeleteObjects.go b/service/s3/api_op_DeleteObjects.go index 46cc2994d56..1d1fa4327e9 100644 --- a/service/s3/api_op_DeleteObjects.go +++ b/service/s3/api_op_DeleteObjects.go @@ -14,31 +14,72 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action enables you to delete multiple objects from a bucket using a single -// HTTP request. If you know the object keys that you want to delete, then this -// action provides a suitable alternative to sending individual delete requests, -// reducing per-request overhead. The request contains a list of up to 1000 keys -// that you want to delete. In the XML, you provide the object key names, and -// optionally, version IDs if you want to delete a specific version of the object -// from a versioning-enabled bucket. For each key, Amazon S3 performs a delete -// action and returns the result of that delete, success, or failure, in the -// response. Note that if the object specified in the request is not found, Amazon -// S3 returns the result as deleted. The action supports two modes for the -// response: verbose and quiet. By default, the action uses verbose mode in which -// the response includes the result of deletion of each key in your request. In -// quiet mode the response includes only keys where the delete action encountered -// an error. For a successful deletion, the action does not return any information -// about the delete in the response body. When performing this action on an MFA -// Delete enabled bucket, that attempts to delete any versioned objects, you must -// include an MFA token. If you do not provide one, the entire request will fail, -// even if there are non-versioned objects you are trying to delete. If you provide -// an invalid token, whether there are versioned keys in the request or not, the -// entire Multi-Object Delete request will fail. For information about MFA Delete, -// see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) -// . Finally, the Content-MD5 header is required for all Multi-Object Delete -// requests. Amazon S3 uses the header value to ensure that your request body has -// not been altered in transit. The following operations are related to -// DeleteObjects : +// This operation enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, then +// this operation provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. The request can contain a list of up to +// 1000 keys that you want to delete. In the XML, you provide the object key names, +// and optionally, version IDs if you want to delete a specific version of the +// object from a versioning-enabled bucket. For each key, Amazon S3 performs a +// delete operation and returns the result of that delete, success or failure, in +// the response. Note that if the object specified in the request is not found, +// Amazon S3 returns the result as deleted. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion in a quiet mode, the operation does not return any +// information about the delete in the response body. When performing this action +// on an MFA Delete enabled bucket, that attempts to delete any versioned objects, +// you must include an MFA token. If you do not provide one, the entire request +// will fail, even if there are non-versioned objects you are trying to delete. If +// you provide an invalid token, whether there are versioned keys in the request or +// not, the entire Multi-Object Delete request will fail. For information about MFA +// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) +// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by +// directory buckets. Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// - s3:DeleteObject - To delete an object from a bucket, you must always specify +// the s3:DeleteObject permission. +// - s3:DeleteObjectVersion - To delete a specific version of an object from a +// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +// permission. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Content-MD5 request header +// - General purpose bucket - The Content-MD5 request header is required for all +// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that +// your request body has not been altered in transit. +// - Directory bucket - The Content-MD5 request header or a additional checksum +// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , +// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all +// Multi-Object Delete requests. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to DeleteObjects : // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) @@ -61,16 +102,26 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, type DeleteObjectsInput struct { - // The bucket name containing the objects to delete. When using this action with - // an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The bucket name containing the objects to delete. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -87,39 +138,57 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. + // s3:BypassGovernanceRetention permission. This functionality is not supported for + // directory buckets. BypassGovernanceRetention *bool - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must - // be the same for all parts and it match the checksum value supplied in the - // CreateMultipartUpload request. + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . If you provide an individual + // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. + // delete enabled. When performing the DeleteObjects operation on an MFA delete + // enabled bucket, which attempts to delete the specified versioned objects, you + // must include an MFA token. If you don't provide an MFA token, the entire request + // will fail, even if there are non-versioned objects that you are trying to + // delete. If you provide an invalid token, whether there are versioned object keys + // in the request or not, the entire Multi-Object Delete request will fail. For + // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer noSmithyDocumentSerde @@ -141,7 +210,7 @@ type DeleteObjectsOutput struct { Errors []types.Error // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -205,6 +274,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { return err } @@ -241,6 +313,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_DeletePublicAccessBlock.go b/service/s3/api_op_DeletePublicAccessBlock.go index 61ce60ef9f6..488d2a797fc 100644 --- a/service/s3/api_op_DeletePublicAccessBlock.go +++ b/service/s3/api_op_DeletePublicAccessBlock.go @@ -9,13 +9,15 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. Removes the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketPublicAccessBlock permission. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . The following operations are related to DeletePublicAccessBlock : // - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) @@ -44,9 +46,9 @@ type DeletePublicAccessBlockInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -54,7 +56,7 @@ type DeletePublicAccessBlockInput struct { func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type DeletePublicAccessBlockOutput struct { @@ -119,6 +121,9 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketAccelerateConfiguration.go b/service/s3/api_op_GetBucketAccelerateConfiguration.go index 671a3c8f8d4..9a90a88a0d5 100644 --- a/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -10,17 +10,19 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the GET action uses the accelerate subresource to return -// the Transfer Acceleration state of a bucket, which is either Enabled or -// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that -// enables you to perform faster data transfers to and from Amazon S3. To use this -// operation, you must have permission to perform the s3:GetAccelerateConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. This implementation of +// the GET action uses the accelerate subresource to return the Transfer +// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3 +// Transfer Acceleration is a bucket-level feature that enables you to perform +// faster data transfers to and from Amazon S3. To use this operation, you must +// have permission to perform the s3:GetAccelerateConfiguration action. The bucket +// owner has this permission by default. The bucket owner can grant this permission +// to others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. You set the Transfer Acceleration state of an // existing bucket to Enabled or Suspended by using the @@ -54,18 +56,19 @@ type GetBucketAccelerateConfigurationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer noSmithyDocumentSerde @@ -73,13 +76,13 @@ type GetBucketAccelerateConfigurationInput struct { func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketAccelerateConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // The accelerate configuration of the bucket. @@ -146,6 +149,9 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketAcl.go b/service/s3/api_op_GetBucketAcl.go index 9fd961d2542..36747fc9cda 100644 --- a/service/s3/api_op_GetBucketAcl.go +++ b/service/s3/api_op_GetBucketAcl.go @@ -10,17 +10,19 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the GET action uses the acl subresource to return the -// access control list (ACL) of a bucket. To use GET to return the ACL of the -// bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is -// granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. To use this API operation against an access -// point, provide the alias of the access point in place of the bucket name. To use -// this API operation against an Object Lambda access point, provide the alias of -// the Object Lambda access point in place of the bucket name. If the Object Lambda +// This operation is not supported by directory buckets. This implementation of +// the GET action uses the acl subresource to return the access control list (ACL) +// of a bucket. To use GET to return the ACL of the bucket, you must have the +// READ_ACP access to the bucket. If READ_ACP permission is granted to the +// anonymous user, you can return the ACL of the bucket without using an +// authorization header. When you use this API operation with an access point, +// provide the alias of the access point in place of the bucket name. When you use +// this API operation with an Object Lambda access point, provide the alias of the +// Object Lambda access point in place of the bucket name. If the Object Lambda // access point alias in a request is not valid, the error code // InvalidAccessPointAliasError is returned. For more information about // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) @@ -49,21 +51,21 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op type GetBucketAclInput struct { - // Specifies the S3 bucket whose ACL is being requested. To use this API operation - // against an access point, provide the alias of the access point in place of the - // bucket name. To use this API operation against an Object Lambda access point, - // provide the alias of the Object Lambda access point in place of the bucket name. - // If the Object Lambda access point alias in a request is not valid, the error - // code InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // Specifies the S3 bucket whose ACL is being requested. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -71,7 +73,7 @@ type GetBucketAclInput struct { func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketAclOutput struct { @@ -143,6 +145,9 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/service/s3/api_op_GetBucketAnalyticsConfiguration.go index 1c3679e6308..0f7922ae9de 100644 --- a/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -10,15 +10,17 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the GET action returns an analytics configuration -// (identified by the analytics configuration ID) from the bucket. To use this -// operation, you must have permissions to perform the s3:GetAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. This implementation of +// the GET action returns an analytics configuration (identified by the analytics +// configuration ID) from the bucket. To use this operation, you must have +// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket +// owner has this permission by default. The bucket owner can grant this permission +// to others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, // see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) @@ -54,9 +56,9 @@ type GetBucketAnalyticsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -64,7 +66,7 @@ type GetBucketAnalyticsConfigurationInput struct { func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketAnalyticsConfigurationOutput struct { @@ -133,6 +135,9 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketCors.go b/service/s3/api_op_GetBucketCors.go index 8902ccd2f4f..33c25aa131b 100644 --- a/service/s3/api_op_GetBucketCors.go +++ b/service/s3/api_op_GetBucketCors.go @@ -10,19 +10,21 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the Cross-Origin Resource Sharing (CORS) configuration information set -// for the bucket. To use this operation, you must have permission to perform the -// s3:GetBucketCORS action. By default, the bucket owner has this permission and -// can grant it to others. To use this API operation against an access point, -// provide the alias of the access point in place of the bucket name. To use this -// API operation against an Object Lambda access point, provide the alias of the -// Object Lambda access point in place of the bucket name. If the Object Lambda -// access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// This operation is not supported by directory buckets. Returns the Cross-Origin +// Resource Sharing (CORS) configuration information set for the bucket. To use +// this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it to +// others. When you use this API operation with an access point, provide the alias +// of the access point in place of the bucket name. When you use this API operation +// with an Object Lambda access point, provide the alias of the Object Lambda +// access point in place of the bucket name. If the Object Lambda access point +// alias in a request is not valid, the error code InvalidAccessPointAliasError is +// returned. For more information about InvalidAccessPointAliasError , see List of +// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) // . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) // . The following operations are related to GetBucketCors : // - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) @@ -44,21 +46,21 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, type GetBucketCorsInput struct { - // The bucket name for which to get the cors configuration. To use this API - // operation against an access point, provide the alias of the access point in - // place of the bucket name. To use this API operation against an Object Lambda - // access point, provide the alias of the Object Lambda access point in place of - // the bucket name. If the Object Lambda access point alias in a request is not - // valid, the error code InvalidAccessPointAliasError is returned. For more - // information about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // The bucket name for which to get the cors configuration. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -66,7 +68,7 @@ type GetBucketCorsInput struct { func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketCorsOutput struct { @@ -136,6 +138,9 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketEncryption.go b/service/s3/api_op_GetBucketEncryption.go index 9bd98cbf2f2..c8be5dd0019 100644 --- a/service/s3/api_op_GetBucketEncryption.go +++ b/service/s3/api_op_GetBucketEncryption.go @@ -10,14 +10,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the default encryption configuration for an Amazon S3 bucket. By -// default, all buckets have a default encryption configuration that uses -// server-side encryption with Amazon S3 managed keys (SSE-S3). For information -// about the bucket default encryption feature, see Amazon S3 Bucket Default -// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// This operation is not supported by directory buckets. Returns the default +// encryption configuration for an Amazon S3 bucket. By default, all buckets have a +// default encryption configuration that uses server-side encryption with Amazon S3 +// managed keys (SSE-S3). For information about the bucket default encryption +// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. To use this operation, you must have permission to // perform the s3:GetEncryptionConfiguration action. The bucket owner has this // permission by default. The bucket owner can grant this permission to others. For @@ -50,9 +51,9 @@ type GetBucketEncryptionInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -60,7 +61,7 @@ type GetBucketEncryptionInput struct { func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketEncryptionOutput struct { @@ -129,6 +130,9 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go index cd74655be71..a3531f98394 100644 --- a/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ b/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -10,10 +10,12 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3 +// This operation is not supported by directory buckets. Gets the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering @@ -64,7 +66,7 @@ type GetBucketIntelligentTieringConfigurationInput struct { func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketIntelligentTieringConfigurationOutput struct { @@ -133,6 +135,9 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketInventoryConfiguration.go b/service/s3/api_op_GetBucketInventoryConfiguration.go index 845c53e3b1f..3fe6f9868c1 100644 --- a/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -10,13 +10,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns an inventory configuration (identified by the inventory configuration -// ID) from the bucket. To use this operation, you must have permissions to perform -// the s3:GetInventoryConfiguration action. The bucket owner has this permission -// by default and can grant this permission to others. For more information about +// This operation is not supported by directory buckets. Returns an inventory +// configuration (identified by the inventory configuration ID) from the bucket. To +// use this operation, you must have permissions to perform the +// s3:GetInventoryConfiguration action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about // permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) @@ -51,9 +53,9 @@ type GetBucketInventoryConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -61,7 +63,7 @@ type GetBucketInventoryConfigurationInput struct { func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketInventoryConfigurationOutput struct { @@ -130,6 +132,9 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketLifecycleConfiguration.go b/service/s3/api_op_GetBucketLifecycleConfiguration.go index d878d134a81..4cc9eff8848 100644 --- a/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -10,16 +10,17 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The response describes the -// new filter element that you can use to specify a filter to select a subset of -// objects to which the rule applies. If you are using a previous version of the -// lifecycle configuration, it still works. For the earlier action, see -// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// This operation is not supported by directory buckets. Bucket lifecycle +// configuration now supports specifying a lifecycle rule using an object key name +// prefix, one or more object tags, or a combination of both. Accordingly, this +// section describes the latest API. The response describes the new filter element +// that you can use to specify a filter to select a subset of objects to which the +// rule applies. If you are using a previous version of the lifecycle +// configuration, it still works. For the earlier action, see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) // . Returns the lifecycle configuration information set on the bucket. For // information about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) // . To use this operation, you must have permission to perform the @@ -60,9 +61,9 @@ type GetBucketLifecycleConfigurationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -70,7 +71,7 @@ type GetBucketLifecycleConfigurationInput struct { func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketLifecycleConfigurationOutput struct { @@ -139,6 +140,9 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketLocation.go b/service/s3/api_op_GetBucketLocation.go index 851548e5e36..e94875fe5e7 100644 --- a/service/s3/api_op_GetBucketLocation.go +++ b/service/s3/api_op_GetBucketLocation.go @@ -15,17 +15,19 @@ import ( smithyxml "github.com/aws/smithy-go/encoding/xml" smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "io" ) -// Returns the Region the bucket resides in. You set the bucket's Region using the -// LocationConstraint request parameter in a CreateBucket request. For more -// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . To use this API operation against an access point, provide the alias of the -// access point in place of the bucket name. To use this API operation against an -// Object Lambda access point, provide the alias of the Object Lambda access point -// in place of the bucket name. If the Object Lambda access point alias in a +// This operation is not supported by directory buckets. Returns the Region the +// bucket resides in. You set the bucket's Region using the LocationConstraint +// request parameter in a CreateBucket request. For more information, see +// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// . When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. When you use this API operation with +// an Object Lambda access point, provide the alias of the Object Lambda access +// point in place of the bucket name. If the Object Lambda access point alias in a // request is not valid, the error code InvalidAccessPointAliasError is returned. // For more information about InvalidAccessPointAliasError , see List of Error // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) @@ -52,21 +54,21 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio type GetBucketLocationInput struct { - // The name of the bucket for which to get the location. To use this API operation - // against an access point, provide the alias of the access point in place of the - // bucket name. To use this API operation against an Object Lambda access point, - // provide the alias of the Object Lambda access point in place of the bucket name. - // If the Object Lambda access point alias in a request is not valid, the error - // code InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // The name of the bucket for which to get the location. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -74,7 +76,7 @@ type GetBucketLocationInput struct { func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketLocationOutput struct { @@ -148,6 +150,9 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketLogging.go b/service/s3/api_op_GetBucketLogging.go index f3f80c9d0e2..5bbca7e4cb6 100644 --- a/service/s3/api_op_GetBucketLogging.go +++ b/service/s3/api_op_GetBucketLogging.go @@ -10,12 +10,13 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the logging status of a bucket and the permissions users have to view -// and modify that status. The following operations are related to GetBucketLogging -// : +// This operation is not supported by directory buckets. Returns the logging +// status of a bucket and the permissions users have to view and modify that +// status. The following operations are related to GetBucketLogging : // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { @@ -40,9 +41,9 @@ type GetBucketLoggingInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -50,7 +51,7 @@ type GetBucketLoggingInput struct { func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketLoggingOutput struct { @@ -121,6 +122,9 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketMetricsConfiguration.go b/service/s3/api_op_GetBucketMetricsConfiguration.go index 28099c60d2e..913b9a04d29 100644 --- a/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -10,16 +10,17 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets a metrics configuration (specified by the metrics configuration ID) from -// the bucket. Note that this doesn't include the daily storage metrics. To use -// this operation, you must have permissions to perform the -// s3:GetMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. Gets a metrics +// configuration (specified by the metrics configuration ID) from the bucket. Note +// that this doesn't include the daily storage metrics. To use this operation, you +// must have permissions to perform the s3:GetMetricsConfiguration action. The +// bucket owner has this permission by default. The bucket owner can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . For information about CloudWatch request metrics for Amazon S3, see // Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) @@ -56,9 +57,9 @@ type GetBucketMetricsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -66,7 +67,7 @@ type GetBucketMetricsConfigurationInput struct { func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketMetricsConfigurationOutput struct { @@ -135,6 +136,9 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketNotificationConfiguration.go b/service/s3/api_op_GetBucketNotificationConfiguration.go index 58d9f614e08..67a35d973ae 100644 --- a/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -10,21 +10,23 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the notification configuration of a bucket. If notifications are not -// enabled on the bucket, the action returns an empty NotificationConfiguration -// element. By default, you must be the bucket owner to read the notification -// configuration of a bucket. However, the bucket owner can use a bucket policy to -// grant permission to other users to read this configuration with the -// s3:GetBucketNotification permission. To use this API operation against an access -// point, provide the alias of the access point in place of the bucket name. To use -// this API operation against an Object Lambda access point, provide the alias of -// the Object Lambda access point in place of the bucket name. If the Object Lambda -// access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// This operation is not supported by directory buckets. Returns the notification +// configuration of a bucket. If notifications are not enabled on the bucket, the +// action returns an empty NotificationConfiguration element. By default, you must +// be the bucket owner to read the notification configuration of a bucket. However, +// the bucket owner can use a bucket policy to grant permission to other users to +// read this configuration with the s3:GetBucketNotification permission. When you +// use this API operation with an access point, provide the alias of the access +// point in place of the bucket name. When you use this API operation with an +// Object Lambda access point, provide the alias of the Object Lambda access point +// in place of the bucket name. If the Object Lambda access point alias in a +// request is not valid, the error code InvalidAccessPointAliasError is returned. +// For more information about InvalidAccessPointAliasError , see List of Error +// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) // . For more information about setting and reading the notification configuration // on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) @@ -47,21 +49,22 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params type GetBucketNotificationConfigurationInput struct { - // The name of the bucket for which to get the notification configuration. To use - // this API operation against an access point, provide the alias of the access - // point in place of the bucket name. To use this API operation against an Object - // Lambda access point, provide the alias of the Object Lambda access point in - // place of the bucket name. If the Object Lambda access point alias in a request - // is not valid, the error code InvalidAccessPointAliasError is returned. For more - // information about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // The name of the bucket for which to get the notification configuration. When + // you use this API operation with an access point, provide the alias of the access + // point in place of the bucket name. When you use this API operation with an + // Object Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError , see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -69,7 +72,7 @@ type GetBucketNotificationConfigurationInput struct { func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } // A container for specifying the notification configuration of the bucket. If @@ -152,6 +155,9 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketOwnershipControls.go b/service/s3/api_op_GetBucketOwnershipControls.go index ddf5c4cd24e..dca55854e96 100644 --- a/service/s3/api_op_GetBucketOwnershipControls.go +++ b/service/s3/api_op_GetBucketOwnershipControls.go @@ -10,12 +10,14 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:GetBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) +// This operation is not supported by directory buckets. Retrieves +// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have +// the s3:GetBucketOwnershipControls permission. For more information about Amazon +// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) // . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // . The following operations are related to GetBucketOwnershipControls : // - PutBucketOwnershipControls @@ -42,9 +44,9 @@ type GetBucketOwnershipControlsInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -52,7 +54,7 @@ type GetBucketOwnershipControlsInput struct { func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketOwnershipControlsOutput struct { @@ -122,6 +124,9 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketPolicy.go b/service/s3/api_op_GetBucketPolicy.go index a76b11cf61b..ff42b705bef 100644 --- a/service/s3/api_op_GetBucketPolicy.go +++ b/service/s3/api_op_GetBucketPolicy.go @@ -9,32 +9,50 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the policy of a specified bucket. If you are using an identity other -// than the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must have the GetBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. -// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. To ensure that bucket owners don't inadvertently lock -// themselves out of their own buckets, the root principal in a bucket owner's -// Amazon Web Services account can perform the GetBucketPolicy , PutBucketPolicy , -// and DeleteBucketPolicy API actions, even if their bucket policy explicitly -// denies the root principal's access. Bucket owner root principals can only be -// blocked from performing these API actions by VPC endpoint policies and Amazon -// Web Services Organizations policies. To use this API operation against an access -// point, provide the alias of the access point in place of the bucket name. To use -// this API operation against an Object Lambda access point, provide the alias of -// the Object Lambda access point in place of the bucket name. If the Object Lambda -// access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about bucket policies, see Using Bucket Policies and -// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . The following action is related to GetBucketPolicy : +// Returns the policy of a specified bucket. Directory buckets - For directory +// buckets, you must make requests for this API operation to the Regional endpoint. +// These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the GetBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a +// 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:GetBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:GetBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See +// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. Directory bucket example bucket policies - See +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . The +// following action is related to GetBucketPolicy : // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { @@ -53,21 +71,33 @@ func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInp type GetBucketPolicyInput struct { - // The bucket name for which to get the bucket policy. To use this API operation - // against an access point, provide the alias of the access point in place of the - // bucket name. To use this API operation against an Object Lambda access point, - // provide the alias of the Object Lambda access point in place of the bucket name. - // If the Object Lambda access point alias in a request is not valid, the error - // code InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The bucket name to get the bucket policy for. Directory buckets - When you use + // this operation with a directory bucket, you must use path-style requests in the + // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide Access points - When you use this API operation with + // an access point, provide the alias of the access point in place of the bucket + // name. Object Lambda access points - When you use this API operation with an + // Object Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError , see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . Access points and Object Lambda access points are not supported by directory + // buckets. // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde @@ -75,7 +105,7 @@ type GetBucketPolicyInput struct { func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketPolicyOutput struct { @@ -144,6 +174,9 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketPolicyStatus.go b/service/s3/api_op_GetBucketPolicyStatus.go index c7ed281c0b8..6acf706fc17 100644 --- a/service/s3/api_op_GetBucketPolicyStatus.go +++ b/service/s3/api_op_GetBucketPolicyStatus.go @@ -10,13 +10,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. In order to use this operation, you must have the -// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 -// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// This operation is not supported by directory buckets. Retrieves the policy +// status for an Amazon S3 bucket, indicating whether the bucket is public. In +// order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // . For more information about when Amazon S3 considers a bucket public, see The // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) // . The following operations are related to GetBucketPolicyStatus : @@ -46,9 +48,9 @@ type GetBucketPolicyStatusInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -56,7 +58,7 @@ type GetBucketPolicyStatusInput struct { func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketPolicyStatusOutput struct { @@ -125,6 +127,9 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketReplication.go b/service/s3/api_op_GetBucketReplication.go index 54c23f7d1f3..8db927c1327 100644 --- a/service/s3/api_op_GetBucketReplication.go +++ b/service/s3/api_op_GetBucketReplication.go @@ -10,13 +10,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the replication configuration of a bucket. It can take a while to -// propagate the put or delete a replication configuration to all Amazon S3 -// systems. Therefore, a get request soon after put or delete can return a wrong -// result. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// This operation is not supported by directory buckets. Returns the replication +// configuration of a bucket. It can take a while to propagate the put or delete a +// replication configuration to all Amazon S3 systems. Therefore, a get request +// soon after put or delete can return a wrong result. For information about +// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 User Guide. This action requires permissions for the // s3:GetReplicationConfiguration action. For more information about permissions, // see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) @@ -49,9 +51,9 @@ type GetBucketReplicationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -59,7 +61,7 @@ type GetBucketReplicationInput struct { func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketReplicationOutput struct { @@ -129,6 +131,9 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketRequestPayment.go b/service/s3/api_op_GetBucketRequestPayment.go index a25d378575a..37c964509d3 100644 --- a/service/s3/api_op_GetBucketRequestPayment.go +++ b/service/s3/api_op_GetBucketRequestPayment.go @@ -10,12 +10,13 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the request payment configuration of a bucket. To use this version of -// the operation, you must be the bucket owner. For more information, see -// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) +// This operation is not supported by directory buckets. Returns the request +// payment configuration of a bucket. To use this version of the operation, you +// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) // . The following operations are related to GetBucketRequestPayment : // - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { @@ -40,9 +41,9 @@ type GetBucketRequestPaymentInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -50,7 +51,7 @@ type GetBucketRequestPaymentInput struct { func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketRequestPaymentOutput struct { @@ -119,6 +120,9 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketTagging.go b/service/s3/api_op_GetBucketTagging.go index 1bf7767d21e..4c2761be88d 100644 --- a/service/s3/api_op_GetBucketTagging.go +++ b/service/s3/api_op_GetBucketTagging.go @@ -10,13 +10,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the tag set associated with the bucket. To use this operation, you must -// have permission to perform the s3:GetBucketTagging action. By default, the -// bucket owner has this permission and can grant this permission to others. -// GetBucketTagging has the following special error: +// This operation is not supported by directory buckets. Returns the tag set +// associated with the bucket. To use this operation, you must have permission to +// perform the s3:GetBucketTagging action. By default, the bucket owner has this +// permission and can grant this permission to others. GetBucketTagging has the +// following special error: // - Error code: NoSuchTagSet // - Description: There is no tag set associated with the bucket. // @@ -45,9 +47,9 @@ type GetBucketTaggingInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -55,7 +57,7 @@ type GetBucketTaggingInput struct { func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketTaggingOutput struct { @@ -126,6 +128,9 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketVersioning.go b/service/s3/api_op_GetBucketVersioning.go index c56edbb4b5e..55cad6297dc 100644 --- a/service/s3/api_op_GetBucketVersioning.go +++ b/service/s3/api_op_GetBucketVersioning.go @@ -10,14 +10,16 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the versioning state of a bucket. To retrieve the versioning state of a -// bucket, you must be the bucket owner. This implementation also returns the MFA -// Delete status of the versioning state. If the MFA Delete status is enabled , the -// bucket owner must use an authentication device to change the versioning state of -// the bucket. The following operations are related to GetBucketVersioning : +// This operation is not supported by directory buckets. Returns the versioning +// state of a bucket. To retrieve the versioning state of a bucket, you must be the +// bucket owner. This implementation also returns the MFA Delete status of the +// versioning state. If the MFA Delete status is enabled , the bucket owner must +// use an authentication device to change the versioning state of the bucket. The +// following operations are related to GetBucketVersioning : // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) @@ -43,9 +45,9 @@ type GetBucketVersioningInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -53,7 +55,7 @@ type GetBucketVersioningInput struct { func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketVersioningOutput struct { @@ -127,6 +129,9 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetBucketWebsite.go b/service/s3/api_op_GetBucketWebsite.go index 4fb220a9db4..f0ebf2b0d28 100644 --- a/service/s3/api_op_GetBucketWebsite.go +++ b/service/s3/api_op_GetBucketWebsite.go @@ -10,12 +10,14 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the website configuration for a bucket. To host website on Amazon S3, -// you can configure a bucket as website by adding a website configuration. For -// more information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// This operation is not supported by directory buckets. Returns the website +// configuration for a bucket. To host website on Amazon S3, you can configure a +// bucket as website by adding a website configuration. For more information about +// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) // . This GET action requires the S3:GetBucketWebsite permission. By default, only // the bucket owner can read the bucket website configuration. However, bucket // owners can allow other users to read the website configuration by writing a @@ -45,9 +47,9 @@ type GetBucketWebsiteInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -55,7 +57,7 @@ type GetBucketWebsiteInput struct { func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetBucketWebsiteOutput struct { @@ -134,6 +136,9 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObject.go b/service/s3/api_op_GetObject.go index 7a4ef1d411f..d2dc15c7cd7 100644 --- a/service/s3/api_op_GetObject.go +++ b/service/s3/api_op_GetObject.go @@ -16,97 +16,98 @@ import ( "time" ) -// Retrieves objects from Amazon S3. To use GET , you must have READ access to the -// object. If you grant READ access to the anonymous user, you can return the -// object without using an authorization header. An Amazon S3 bucket has no -// directory hierarchy such as you would find in a typical computer file system. -// You can, however, create a logical hierarchy by using object key names that -// imply a folder structure. For example, instead of naming an object sample.jpg , -// you can name it photos/2006/February/sample.jpg . To get an object from such a -// logical hierarchy, specify the full key name for the object in the GET -// operation. For a virtual hosted-style request example, if you have the object -// photos/2006/February/sample.jpg , specify the resource as -// /photos/2006/February/sample.jpg . For a path-style request example, if you have -// the object photos/2006/February/sample.jpg in the bucket named examplebucket , -// specify the resource as /examplebucket/photos/2006/February/sample.jpg . For -// more information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) -// . For more information about returning the ACL of an object, see GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// . If the object you are retrieving is stored in the S3 Glacier Flexible -// Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering -// Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve -// the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this action returns an InvalidObjectState error. For information -// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// . Encryption request headers, like x-amz-server-side-encryption , should not be -// sent for GET requests if your object uses server-side encryption with Key -// Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with -// Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon -// S3 managed encryption keys (SSE-S3). If your object does use these types of -// keys, you’ll get an HTTP 400 Bad Request error. If you encrypt an object by -// using server-side encryption with customer-provided encryption keys (SSE-C) when -// you store the object in Amazon S3, then when you GET the object, you must use -// the following headers: -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 +// Retrieves an object from Amazon S3. In the GetObject request, specify the full +// key name for the object. General purpose buckets - Both the virtual-hosted-style +// requests and the path-style requests are supported. For a virtual hosted-style +// request example, if you have the object photos/2006/February/sample.jpg , +// specify the object key name as /photos/2006/February/sample.jpg . For a +// path-style request example, if you have the object +// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the +// object key name as /examplebucket/photos/2006/February/sample.jpg . For more +// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) +// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style +// requests are supported. For a virtual hosted-style request example, if you have +// the object photos/2006/February/sample.jpg in the bucket named +// examplebucket--use1-az5--x-s3 , specify the object key name as +// /photos/2006/February/sample.jpg . Also, when you make requests to this API +// operation, your requests are sent to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - You must have the required permissions +// in a policy. To use GetObject , you must have the READ access to the object +// (or version). If you grant READ access to the anonymous user, the GetObject +// operation returns the object without using an authorization header. For more +// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If you include a versionId in your request +// header, you must have the s3:GetObjectVersion permission to access a specific +// version of an object. The s3:GetObject permission is not required in this +// scenario. If you request the current version of an object without a specific +// versionId in the request header, only the s3:GetObject permission is required. +// The s3:GetObjectVersion permission is not required in this scenario. If the +// object that you request doesn’t exist, the error that Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Access Denied error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// . Assuming you have the relevant permission to read object tags, the response -// also returns the x-amz-tagging-count header that provides the count of number -// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// to retrieve the tag set associated with an object. Permissions You need the -// relevant read object (or version) permission for this operation. For more -// information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . If the object that you request doesn’t exist, the error that Amazon S3 returns -// depends on whether you also have the s3:ListBucket permission. If you have the -// s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code -// 404 (Not Found) error. If you don’t have the s3:ListBucket permission, Amazon -// S3 returns an HTTP status code 403 ("access denied") error. Versioning By -// default, the GET action returns the current version of an object. To return a -// different version, use the versionId subresource. -// - If you supply a versionId , you need the s3:GetObjectVersion permission to -// access a specific version of an object. If you request a specific version, you -// do not need to have the s3:GetObject permission. If you request the current -// version without a specific version ID, only s3:GetObject permission is -// required. s3:GetObjectVersion permission won't be required. -// - If the current version of the object is a delete marker, Amazon S3 behaves -// as if the object was deleted and includes x-amz-delete-marker: true in the -// response. -// - If the specified version is a delete marker, the response returns a 405 -// (Method Not Allowed) error and the Last-Modified: timestamp response header. -// -// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) -// . Overriding Response Header Values There are times when you want to override -// certain response header values in a GET response. For example, you might -// override the Content-Disposition response header value in your GET request. You -// can override values for a set of response headers using the following query -// parameters. These response header values are sent only on a successful request, -// that is, when status code 200 OK is returned. The set of headers you can -// override using these parameters is a subset of the headers that Amazon S3 -// accepts when you create an object. The response headers that you can override -// for the GET response are Content-Type , Content-Language , Expires , -// Cache-Control , Content-Disposition , and Content-Encoding . To override these -// header values in the GET response, you use the following request parameters. -// You must sign the request, either using an Authorization header or a presigned -// URL, when using these parameters. They cannot be used with an unsigned -// (anonymous) request. -// - response-content-type -// - response-content-language -// - response-expires +// Storage classes If the object you are retrieving is stored in the S3 Glacier +// Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the +// S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep +// Archive Access tier, before you can retrieve the object you must first restore a +// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// . Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the +// S3 Express One Zone storage class is supported to store newly created objects. +// Unsupported storage class values won't write a destination object and will +// respond with the HTTP status code 400 Bad Request . Encryption Encryption +// request headers, like x-amz-server-side-encryption , should not be sent for the +// GetObject requests, if your object uses server-side encryption with Amazon S3 +// managed encryption keys (SSE-S3), server-side encryption with Key Management +// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon +// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject +// requests for the object that uses these types of keys, you’ll get an HTTP 400 +// Bad Request error. Overriding response header values through the request There +// are times when you want to override certain response header values of a +// GetObject response. For example, you might override the Content-Disposition +// response header value through your GetObject request. You can override values +// for a set of response headers. These modified response header values are +// included only in a successful response, that is, when the HTTP status code 200 +// OK is returned. The headers you can override using the following query +// parameters in the request are a subset of the headers that Amazon S3 accepts +// when you create an object. The response headers that you can override for the +// GetObject response are Cache-Control , Content-Disposition , Content-Encoding , +// Content-Language , Content-Type , and Expires . To override values for a set of +// response headers in the GetObject response, you can use the following query +// parameters in the request. // - response-cache-control // - response-content-disposition // - response-content-encoding +// - response-content-language +// - response-content-type +// - response-expires // -// Overriding Response Header Values If both of the If-Match and -// If-Unmodified-Since headers are present in the request as follows: If-Match -// condition evaluates to true , and; If-Unmodified-Since condition evaluates to -// false ; then, S3 returns 200 OK and the data requested. If both of the -// If-None-Match and If-Modified-Since headers are present in the request as -// follows: If-None-Match condition evaluates to false , and; If-Modified-Since -// condition evaluates to true ; then, S3 returns 304 Not Modified response code. -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) -// . The following operations are related to GetObject : +// When you use these parameters, you must sign the request by using either an +// Authorization header or a presigned URL. These parameters cannot be used with an +// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// The following operations are related to GetObject : // - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) // - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { @@ -126,18 +127,30 @@ func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns . type GetObjectInput struct { - // The bucket name containing the object. When using this action with an access - // point, you must direct requests to the access point hostname. The access point - // hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using an Object Lambda access point the + // The bucket name containing the object. Directory buckets - When you use this + // operation with a directory bucket, you must use virtual-hosted-style requests in + // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style + // requests are not supported. Directory bucket names must be unique in the chosen + // Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Object Lambda access points - When you use this + // action with an Object Lambda access point, you must direct requests to the + // Object Lambda access point hostname. The Object Lambda access point hostname + // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // Access points and Object Lambda access points are not supported by directory + // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form - // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. When you use - // this action with Amazon S3 on Outposts, you must direct requests to the S3 on - // Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -155,25 +168,43 @@ type GetObjectInput struct { // To retrieve the checksum, this mode must be enabled. ChecksumMode types.ChecksumMode - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. + // specified in this header; otherwise, return a 412 Precondition Failed error. If + // both of the If-Match and If-Unmodified-Since headers are present in the request + // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since + // condition evaluates to false ; then, S3 returns 200 OK and the data requested. + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) + // . IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. + // otherwise, return a 304 Not Modified error. If both of the If-None-Match and + // If-Modified-Since headers are present in the request as follows: If-None-Match + // condition evaluates to false , and; If-Modified-Since condition evaluates to + // true ; then, S3 returns 304 Not Modified status code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. + // specified in this header; otherwise, return a 304 Not Modified error. If both + // of the If-None-Match and If-Modified-Since headers are present in the request + // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since + // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status + // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) + // . IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. + // otherwise, return a 412 Precondition Failed error. If both of the If-Match and + // If-Unmodified-Since headers are present in the request as follows: If-Match + // condition evaluates to true , and; If-Unmodified-Since condition evaluates to + // false ; then, S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -181,25 +212,25 @@ type GetObjectInput struct { // Useful for downloading just a part of an object. PartNumber *int32 - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see - // https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) + // Downloads the specified byte range of an object. For more information about the + // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // Sets the Cache-Control header of the response. ResponseCacheControl *string - // Sets the Content-Disposition header of the response + // Sets the Content-Disposition header of the response. ResponseContentDisposition *string // Sets the Content-Encoding header of the response. @@ -214,23 +245,66 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time - // Specifies the algorithm to use to when decrypting the object (for example, - // AES256). + // Specifies the algorithm to use when decrypting the object (for example, AES256 + // ). If you encrypt an object by using server-side encryption with + // customer-provided encryption keys (SSE-C) when you store the object in Amazon + // S3, then when you GET the object, you must use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // Specifies the customer-provided encryption key for Amazon S3 used to encrypt - // the data. This value is used to decrypt the object when recovering it and must - // match the one used when storing the data. The key must be appropriate for use - // with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // Specifies the customer-provided encryption key that you originally provided for + // Amazon S3 to encrypt the data before storing it. This value is used to decrypt + // the object when recovering it and must match the one used when storing the data. + // The key must be appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object + // by using server-side encryption with customer-provided encryption keys (SSE-C) + // when you store the object in Amazon S3, then when you GET the object, you must + // use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKey *string - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity check + // to ensure that the encryption key was transmitted without error. If you encrypt + // an object by using server-side encryption with customer-provided encryption keys + // (SSE-C) when you store the object in Amazon S3, then when you GET the object, + // you must use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKeyMD5 *string - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. By default, the + // GetObject operation returns the current version of an object. To return a + // different version, use the versionId subresource. + // - If you include a versionId in your request header, you must have the + // s3:GetObjectVersion permission to access a specific version of an object. The + // s3:GetObject permission is not required in this scenario. + // - If you request the current version of an object without a specific versionId + // in the request header, only the s3:GetObject permission is required. The + // s3:GetObjectVersion permission is not required in this scenario. + // - Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. For this API operation, only the null value of the version ID is + // supported by directory buckets. You can only specify null to the versionId + // query parameter in the request. + // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) + // . VersionId *string noSmithyDocumentSerde @@ -244,51 +318,48 @@ func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { type GetObjectOutput struct { - // Indicates that a range of bytes was specified. + // Indicates that a range of bytes was specified in the request. AcceptRanges *string // Object data. Body io.ReadCloser // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). + // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not + // supported for directory buckets. BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string // Specifies presentational information for the object. ContentDisposition *string - // Specifies what content encodings have been applied to the object and thus what + // Indicates what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the // Content-Type header field. ContentEncoding *string @@ -305,24 +376,34 @@ type GetObjectOutput struct { // A standard MIME type describing the format of the object data. ContentType *string - // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // - If the current version of the object is a delete marker, Amazon S3 behaves + // as if the object was deleted and includes x-amz-delete-marker: true in the + // response. + // - If the specified version in the request is a delete marker, the response + // returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is - // URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ), the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. This functionality is not supported for directory + // buckets. Expiration *string // The date and time at which the object is no longer cacheable. Expires *time.Time - // Date and time when the object was last modified. + // Date and time when the object was last modified. General purpose buckets - When + // you specify a versionId of the object in your request, if the specified version + // in the request is a delete marker, the response returns a 405 Method Not Allowed + // error and the Last-Modified: timestamp response header. LastModified *time.Time // A map of metadata to store with the object in S3. @@ -330,20 +411,24 @@ type GetObjectOutput struct { // Map keys will be normalized to lower-case. Metadata map[string]string - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. + // This is set to the number of metadata entries not returned in the headers that + // are prefixed with x-amz-meta- . This can happen if you create metadata using an + // API like SOAP that supports more flexible metadata than the REST API. For + // example, using SOAP, you can create metadata whose values are not legal HTTP + // headers. This functionality is not supported for directory buckets. MissingMeta *int32 // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. + // returned if you have permission to view an object's legal hold status. This + // functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode currently in place for this object. + // The Object Lock mode that's currently in place for this object. This + // functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when this object's Object Lock will expire. + // The date and time when this object's Object Lock will expire. This + // functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -351,48 +436,63 @@ type GetObjectOutput struct { PartsCount *int32 // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. + // source or destination in a replication rule. This functionality is not supported + // for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration action and expiration time of the - // restored object copy. + // restored object copy. This functionality is not supported for directory buckets. + // Only the S3 Express One Zone storage class is supported by directory buckets to + // store objects. Restore *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header confirming the encryption - // algorithm used. + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide round-trip message - // integrity verification of the customer-provided encryption key. + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. + // for all objects except for S3 Standard storage class objects. Directory buckets + // - Only the S3 Express One Zone storage class is supported by directory buckets + // to store objects. StorageClass types.StorageClass - // The number of tags, if any, on the object. + // The number of tags, if any, on the object, when you have the relevant + // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) + // to retrieve the tag set associated with an object. This functionality is not + // supported for directory buckets. TagCount *int32 - // Version of the object. + // Version ID of the object. This functionality is not supported for directory + // buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -453,6 +553,9 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObjectAcl.go b/service/s3/api_op_GetObjectAcl.go index 4f3bf8095d9..3b2a16872ac 100644 --- a/service/s3/api_op_GetObjectAcl.go +++ b/service/s3/api_op_GetObjectAcl.go @@ -13,10 +13,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the access control list (ACL) of an object. To use this operation, you -// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For -// more information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) -// in the Amazon S3 User Guide This action is not supported by Amazon S3 on +// This operation is not supported by directory buckets. Returns the access +// control list (ACL) of an object. To use this operation, you must have +// s3:GetObjectAcl permissions or READ_ACP access to the object. For more +// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on // Outposts. By default, GET returns ACL information about the current version of // an object. To return ACL information about a different version, use the // versionId subresource. If your bucket uses the bucket owner enforced setting for @@ -48,7 +49,9 @@ func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, op type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide @@ -64,21 +67,23 @@ type GetObjectAclInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. This + // functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -99,7 +104,7 @@ type GetObjectAclOutput struct { Owner *types.Owner // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -163,6 +168,9 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObjectAttributes.go b/service/s3/api_op_GetObjectAttributes.go index d8a4555f61f..ed53ae7b31e 100644 --- a/service/s3/api_op_GetObjectAttributes.go +++ b/service/s3/api_op_GetObjectAttributes.go @@ -15,57 +15,87 @@ import ( ) // Retrieves all the metadata from an object without returning the object itself. -// This action is useful if you're interested only in an object's metadata. To use -// GetObjectAttributes , you must have READ access to the object. +// This operation is useful if you're interested only in an object's metadata. // GetObjectAttributes combines the functionality of HeadObject and ListParts . All // of the data returned with each of those individual calls can be returned with a -// single call to GetObjectAttributes . If you encrypt an object by using -// server-side encryption with customer-provided encryption keys (SSE-C) when you -// store the object in Amazon S3, then when you retrieve the metadata from the -// object, you must use the following headers: +// single call to GetObjectAttributes . Directory buckets - For directory buckets, +// you must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use GetObjectAttributes , you must +// have READ access to the object. The permissions that you need to use this +// operation with depend on whether the bucket is versioned. If the bucket is +// versioned, you need both the s3:GetObjectVersion and +// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is +// not versioned, you need the s3:GetObject and s3:GetObjectAttributes +// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, the +// error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found ("no such key") error. +// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for HEAD requests if your object uses server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3). The +// x-amz-server-side-encryption header is used when you PUT an object to S3 and +// want to specify the encryption method. If you include this header in a GET +// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad +// Request error. It's because the encryption method can't be changed when you +// retrieve the object. If you encrypt an object by using server-side encryption +// with customer-provided encryption keys (SSE-C) when you store the object in +// Amazon S3, then when you retrieve the metadata from the object, you must use the +// following headers to provide the encryption key for the server to be able to +// retrieve the object's metadata. The headers are: // - x-amz-server-side-encryption-customer-algorithm // - x-amz-server-side-encryption-customer-key // - x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, see Server-Side Encryption (Using // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. -// - Encryption request headers, such as x-amz-server-side-encryption , should -// not be sent for GET requests if your object uses server-side encryption with -// Amazon Web Services KMS keys stored in Amazon Web Services Key Management -// Service (SSE-KMS) or server-side encryption with Amazon S3 managed keys -// (SSE-S3). If your object does use these types of keys, you'll get an HTTP 400 -// Bad Request error. -// - The last modified property in this case is the creation date of the object. -// -// Consider the following when using request headers: +// in the Amazon S3 User Guide. Directory bucket permissions - For directory +// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( +// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't +// enabled and supported for directory buckets. For this API operation, only the +// null value of the version ID is supported by directory buckets. You can only +// specify null to the versionId query parameter in the request. Conditional +// request headers Consider the following when using request headers: // - If both of the If-Match and If-Unmodified-Since headers are present in the // request as follows, then Amazon S3 returns the HTTP status code 200 OK and the // data requested: // - If-Match condition evaluates to true . -// - If-Unmodified-Since condition evaluates to false . +// - If-Unmodified-Since condition evaluates to false . For more information +// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) +// . // - If both of the If-None-Match and If-Modified-Since headers are present in // the request as follows, then Amazon S3 returns the HTTP status code 304 Not // Modified : // - If-None-Match condition evaluates to false . -// - If-Modified-Since condition evaluates to true . -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) -// . Permissions The permissions that you need to use this operation depend on -// whether the bucket is versioned. If the bucket is versioned, you need both the -// s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this -// operation. If the bucket is not versioned, you need the s3:GetObject and -// s3:GetObjectAttributes permissions. For more information, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If the object that you request does not exist, the -// error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found ("no such key") error. -// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden ("access denied") error. +// - If-Modified-Since condition evaluates to true . For more information about +// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . // -// The following actions are related to GetObjectAttributes : +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are +// related to GetObjectAttributes : // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) // - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) @@ -91,16 +121,26 @@ func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttri type GetObjectAttributesInput struct { - // The name of the bucket that contains the object. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the bucket that contains the object. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -121,9 +161,9 @@ type GetObjectAttributesInput struct { // This member is required. ObjectAttributes []types.ObjectAttributes - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Sets the maximum number of parts to return. @@ -135,29 +175,37 @@ type GetObjectAttributesInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string - // The version ID used to reference a specific version of the object. + // The version ID used to reference a specific version of the object. S3 + // Versioning isn't enabled and supported for directory buckets. For this API + // operation, only the null value of the version ID is supported by directory + // buckets. You can only specify null to the versionId query parameter in the + // request. VersionId *string noSmithyDocumentSerde @@ -175,6 +223,7 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was ( true ) or was not ( false ) a // delete marker. If false , this response header does not appear in the response. + // This functionality is not supported for directory buckets. DeleteMarker *bool // An ETag is an opaque identifier assigned by a web server to a specific version @@ -191,16 +240,18 @@ type GetObjectAttributesOutput struct { ObjectSize *int64 // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides the storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. For more // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . + // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass - // The version ID of the object. + // The version ID of the object. This functionality is not supported for directory + // buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -264,6 +315,9 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObjectLegalHold.go b/service/s3/api_op_GetObjectLegalHold.go index fb244d1b462..3f7af220230 100644 --- a/service/s3/api_op_GetObjectLegalHold.go +++ b/service/s3/api_op_GetObjectLegalHold.go @@ -13,10 +13,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets an object's current legal hold status. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) . -// This action is not supported by Amazon S3 on Outposts. The following action is -// related to GetObjectLegalHold : +// This operation is not supported by directory buckets. Gets an object's current +// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. The following +// action is related to GetObjectLegalHold : // - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { if params == nil { @@ -36,8 +36,10 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to - // retrieve. When using this action with an access point, you must direct requests - // to the access point hostname. The access point hostname takes the form + // retrieve. Access points - When you use this action with an access point, you + // must provide the alias of the access point in place of the bucket name or + // specify the access point ARN. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about @@ -52,18 +54,19 @@ type GetObjectLegalHoldInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The version ID of the object whose legal hold status you want to retrieve. @@ -143,6 +146,9 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObjectLockConfiguration.go b/service/s3/api_op_GetObjectLockConfiguration.go index c76888814c2..1ba436d53ce 100644 --- a/service/s3/api_op_GetObjectLockConfiguration.go +++ b/service/s3/api_op_GetObjectLockConfiguration.go @@ -13,9 +13,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets the Object Lock configuration for a bucket. The rule specified in the -// Object Lock configuration will be applied by default to every new object placed -// in the specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// This operation is not supported by directory buckets. Gets the Object Lock +// configuration for a bucket. The rule specified in the Object Lock configuration +// will be applied by default to every new object placed in the specified bucket. +// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) // . The following action is related to GetObjectLockConfiguration : // - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { @@ -35,8 +36,10 @@ func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObje type GetObjectLockConfigurationInput struct { - // The bucket whose Object Lock configuration you want to retrieve. When using - // this action with an access point, you must direct requests to the access point + // The bucket whose Object Lock configuration you want to retrieve. Access points + // - When you use this action with an access point, you must provide the alias of + // the access point in place of the bucket name or specify the access point ARN. + // When using the access point ARN, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide @@ -47,9 +50,9 @@ type GetObjectLockConfigurationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -126,6 +129,9 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObjectRetention.go b/service/s3/api_op_GetObjectRetention.go index d992fccbd57..1dea7d8abd1 100644 --- a/service/s3/api_op_GetObjectRetention.go +++ b/service/s3/api_op_GetObjectRetention.go @@ -13,10 +13,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves an object's retention settings. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) . -// This action is not supported by Amazon S3 on Outposts. The following action is -// related to GetObjectRetention : +// This operation is not supported by directory buckets. Retrieves an object's +// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. The following +// action is related to GetObjectRetention : // - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { if params == nil { @@ -36,8 +36,10 @@ func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetent type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to - // retrieve. When using this action with an access point, you must direct requests - // to the access point hostname. The access point hostname takes the form + // retrieve. Access points - When you use this action with an access point, you + // must provide the alias of the access point in place of the bucket name or + // specify the access point ARN. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about @@ -52,18 +54,19 @@ type GetObjectRetentionInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The version ID for the object whose retention settings you want to retrieve. @@ -143,6 +146,9 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObjectTagging.go b/service/s3/api_op_GetObjectTagging.go index 218e7d0218c..c020e9bd751 100644 --- a/service/s3/api_op_GetObjectTagging.go +++ b/service/s3/api_op_GetObjectTagging.go @@ -13,15 +13,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the tag-set of an object. You send the GET request against the tagging -// subresource associated with the object. To use this operation, you must have -// permission to perform the s3:GetObjectTagging action. By default, the GET -// action returns information about current version of an object. For a versioned -// bucket, you can have multiple versions of an object in your bucket. To retrieve -// tags of any other version, use the versionId query parameter. You also need -// permission for the s3:GetObjectVersionTagging action. By default, the bucket -// owner has this permission and can grant this permission to others. For -// information about the Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) +// This operation is not supported by directory buckets. Returns the tag-set of an +// object. You send the GET request against the tagging subresource associated with +// the object. To use this operation, you must have permission to perform the +// s3:GetObjectTagging action. By default, the GET action returns information about +// current version of an object. For a versioned bucket, you can have multiple +// versions of an object in your bucket. To retrieve tags of any other version, use +// the versionId query parameter. You also need permission for the +// s3:GetObjectVersionTagging action. By default, the bucket owner has this +// permission and can grant this permission to others. For information about the +// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) // . The following actions are related to GetObjectTagging : // - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) // - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) @@ -44,15 +45,17 @@ func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingI type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -67,18 +70,19 @@ type GetObjectTaggingInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The versionId of the object for which to get the tagging information. @@ -163,6 +167,9 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetObjectTorrent.go b/service/s3/api_op_GetObjectTorrent.go index f5048ce7e63..6689ef97665 100644 --- a/service/s3/api_op_GetObjectTorrent.go +++ b/service/s3/api_op_GetObjectTorrent.go @@ -14,12 +14,13 @@ import ( "io" ) -// Returns torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. You can get torrent only for objects that are -// less than 5 GB in size, and that are not encrypted using server-side encryption -// with a customer-provided encryption key. To use GET, you must have READ access -// to the object. This action is not supported by Amazon S3 on Outposts. The -// following action is related to GetObjectTorrent : +// This operation is not supported by directory buckets. Returns torrent files +// from a bucket. BitTorrent can save you bandwidth when you're distributing large +// files. You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. To use GET, you must have READ access to the object. This +// functionality is not supported for Amazon S3 on Outposts. The following action +// is related to GetObjectTorrent : // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { if params == nil { @@ -48,18 +49,19 @@ type GetObjectTorrentInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer noSmithyDocumentSerde @@ -76,7 +78,7 @@ type GetObjectTorrentOutput struct { Body io.ReadCloser // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -137,6 +139,9 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_GetPublicAccessBlock.go b/service/s3/api_op_GetPublicAccessBlock.go index 6a2cd83a9d7..0ae12e397cd 100644 --- a/service/s3/api_op_GetPublicAccessBlock.go +++ b/service/s3/api_op_GetPublicAccessBlock.go @@ -10,13 +10,14 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For -// more information about Amazon S3 permissions, see Specifying Permissions in a -// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// This operation is not supported by directory buckets. Retrieves the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketPublicAccessBlock permission. For more +// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or // an object, it checks the PublicAccessBlock configuration for both the bucket // (or the bucket that contains the object) and the bucket owner's account. If the @@ -52,9 +53,9 @@ type GetPublicAccessBlockInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -62,7 +63,7 @@ type GetPublicAccessBlockInput struct { func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type GetPublicAccessBlockOutput struct { @@ -132,6 +133,9 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_HeadBucket.go b/service/s3/api_op_HeadBucket.go index 297376bc8e4..8a18ff851e7 100644 --- a/service/s3/api_op_HeadBucket.go +++ b/service/s3/api_op_HeadBucket.go @@ -17,30 +17,44 @@ import ( "time" ) -// This action is useful to determine if a bucket exists and you have permission -// to access it. The action returns a 200 OK if the bucket exists and you have -// permission to access it. If the bucket does not exist or you do not have -// permission to access it, the HEAD request returns a generic 400 Bad Request , -// 403 Forbidden or 404 Not Found code. A message body is not included, so you -// cannot determine the exception beyond these error codes. To use this operation, -// you must have permissions to perform the s3:ListBucket action. The bucket owner -// has this permission by default and can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . To use this API operation against an access point, you must provide the alias -// of the access point in place of the bucket name or specify the access point ARN. -// When using the access point ARN, you must direct requests to the access point -// hostname. The access point hostname takes the form -// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the -// Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For -// more information, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) -// . To use this API operation against an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . +// You can use this operation to determine if a bucket exists and if you have +// permission to access it. The action returns a 200 OK if the bucket exists and +// you have permission to access it. If the bucket does not exist or you do not +// have permission to access it, the HEAD request returns a generic 400 Bad Request +// , 403 Forbidden or 404 Not Found code. A message body is not included, so you +// cannot determine the exception beyond these error codes. Directory buckets - You +// must make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests +// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket +// requests must be authenticated and signed by using IAM credentials (access key +// ID and secret access key for the IAM identities). All headers with the x-amz- +// prefix, including x-amz-copy-source , must be signed. For more information, see +// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory bucket - You must use IAM credentials to authenticate and authorize +// your access to the HeadBucket API operation, instead of using the temporary +// security credentials through the CreateSession API operation. Amazon Web +// Services CLI or SDKs handles authentication and authorization on your behalf. +// Permissions +// - General purpose bucket permissions - To use this operation, you must have +// permissions to perform the s3:ListBucket action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Managing access permissions to your Amazon +// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have the s3express:CreateSession +// permission in the Action element of a policy. By default, the session is in +// the ReadWrite mode. If you want to restrict the access, you can explicitly set +// the s3express:SessionMode condition key to ReadOnly on the bucket. For more +// information about example bucket policies, see Example bucket policies for S3 +// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { if params == nil { params = &HeadBucketInput{} @@ -58,19 +72,31 @@ func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns type HeadBucketInput struct { - // The bucket name. When using this action with an access point, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with an Object Lambda - // access point, provide the alias of the Object Lambda access point in place of - // the bucket name. If the Object Lambda access point alias in a request is not - // valid, the error code InvalidAccessPointAliasError is returned. For more - // information about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . When you use this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Object Lambda access points - When you use this API + // operation with an Object Lambda access point, provide the alias of the Object + // Lambda access point in place of the bucket name. If the Object Lambda access + // point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . Access points and Object Lambda access points are not supported by directory + // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -80,9 +106,9 @@ type HeadBucketInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -94,6 +120,25 @@ func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { } type HeadBucketOutput struct { + + // Indicates whether the bucket name used in the request is an access point alias. + // This functionality is not supported for directory buckets. + AccessPointAlias *bool + + // The name of the location where the bucket will be created. For directory + // buckets, the AZ ID of the Availability Zone where the bucket is created. An + // example AZ ID value is usw2-az2 . This functionality is only supported by + // directory buckets. + BucketLocationName *string + + // The type of location where the bucket is created. This functionality is only + // supported by directory buckets. + BucketLocationType types.LocationType + + // The Region that the bucket is located. This functionality is not supported for + // directory buckets. + BucketRegion *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -155,6 +200,9 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpHeadBucketValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_HeadObject.go b/service/s3/api_op_HeadObject.go index a006808d8e2..5f7b55e56ad 100644 --- a/service/s3/api_op_HeadObject.go +++ b/service/s3/api_op_HeadObject.go @@ -17,66 +17,87 @@ import ( "time" ) -// The HEAD action retrieves metadata from an object without returning the object -// itself. This action is useful if you're only interested in an object's metadata. -// To use HEAD , you must have READ access to the object. A HEAD request has the -// same options as a GET action on an object. The response is identical to the GET -// response except that there is no response body. Because of this, if the HEAD -// request generates an error, it returns a generic code, such as 400 Bad Request , -// 403 Forbidden , 404 Not Found , 405 Method Not Allowed , 412 Precondition Failed -// , or 304 Not Modified . It's not possible to retrieve the exact exception of -// these error codes. If you encrypt an object by using server-side encryption with -// customer-provided encryption keys (SSE-C) when you store the object in Amazon -// S3, then when you retrieve the metadata from the object, you must use the -// following headers: +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're interested only in an object's +// metadata. A HEAD request has the same options as a GET operation on an object. +// The response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns a +// generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 +// Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not +// possible to retrieve the exact exception of these error codes. Request headers +// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) +// . Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use HEAD , you must have the +// s3:GetObject permission. You need the relevant read object (or version) +// permission for this operation. For more information, see Actions, resources, +// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) +// in the Amazon S3 User Guide. If the object you request doesn't exist, the error +// that Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for HEAD requests if your object uses server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3). The +// x-amz-server-side-encryption header is used when you PUT an object to S3 and +// want to specify the encryption method. If you include this header in a HEAD +// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad +// Request error. It's because the encryption method can't be changed when you +// retrieve the object. If you encrypt an object by using server-side encryption +// with customer-provided encryption keys (SSE-C) when you store the object in +// Amazon S3, then when you retrieve the metadata from the object, you must use the +// following headers to provide the encryption key for the server to be able to +// retrieve the object's metadata. The headers are: // - x-amz-server-side-encryption-customer-algorithm // - x-amz-server-side-encryption-customer-key // - x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, see Server-Side Encryption (Using // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// . -// - Encryption request headers, like x-amz-server-side-encryption , should not -// be sent for GET requests if your object uses server-side encryption with Key -// Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with -// Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon -// S3 managed encryption keys (SSE-S3). If your object does use these types of -// keys, you’ll get an HTTP 400 Bad Request error. -// - The last modified property in this case is the creation date of the object. -// -// Request headers are limited to 8 KB in size. For more information, see Common -// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) -// . Consider the following when using request headers: -// - Consideration 1 – If both of the If-Match and If-Unmodified-Since headers -// are present in the request as follows: -// - If-Match condition evaluates to true , and; -// - If-Unmodified-Since condition evaluates to false ; Then Amazon S3 returns -// 200 OK and the data requested. -// - Consideration 2 – If both of the If-None-Match and If-Modified-Since headers -// are present in the request as follows: -// - If-None-Match condition evaluates to false , and; -// - If-Modified-Since condition evaluates to true ; Then Amazon S3 returns the -// 304 Not Modified response code. -// -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) -// . Permissions You need the relevant read object (or version) permission for this -// operation. For more information, see Actions, resources, and condition keys for -// Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) . -// If the object you request doesn't exist, the error that Amazon S3 returns -// depends on whether you also have the s3:ListBucket permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 error. +// in the Amazon S3 User Guide. Directory bucket permissions - For directory +// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( +// AES256 ) is supported. Versioning // -// Versioning // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the // response. +// // - If the specified version is a delete marker, the response returns a 405 -// (Method Not Allowed) error and the Last-Modified: timestamp response header. +// Method Not Allowed error and the Last-Modified: timestamp response header. +// +// - Directory buckets - Delete marker is not supported by directory buckets. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. // -// The following actions are related to HeadObject : +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are +// related to HeadObject : // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { @@ -96,16 +117,26 @@ func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns type HeadObjectInput struct { - // The name of the bucket containing the object. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the bucket that contains the object. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -126,25 +157,46 @@ type HeadObjectInput struct { // the kms:Decrypt action for the request to succeed. ChecksumMode types.ChecksumMode - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. + // specified; otherwise, return a 412 (precondition failed) error. If both of the + // If-Match and If-Unmodified-Since headers are present in the request as follows: + // - If-Match condition evaluates to true , and; + // - If-Unmodified-Since condition evaluates to false ; + // Then Amazon S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. + // otherwise, return a 304 (not modified) error. If both of the If-None-Match and + // If-Modified-Since headers are present in the request as follows: + // - If-None-Match condition evaluates to false , and; + // - If-Modified-Since condition evaluates to true ; + // Then Amazon S3 returns the 304 Not Modified response code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. + // specified; otherwise, return a 304 (not modified) error. If both of the + // If-None-Match and If-Modified-Since headers are present in the request as + // follows: + // - If-None-Match condition evaluates to false , and; + // - If-Modified-Since condition evaluates to true ; + // Then Amazon S3 returns the 304 Not Modified response code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. + // otherwise, return a 412 (precondition failed) error. If both of the If-Match + // and If-Unmodified-Since headers are present in the request as follows: + // - If-Match condition evaluates to true , and; + // - If-Unmodified-Since condition evaluates to false ; + // Then Amazon S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -160,30 +212,35 @@ type HeadObjectInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. For directory + // buckets in this API operation, only the null value of the version ID is + // supported. VersionId *string noSmithyDocumentSerde @@ -200,48 +257,58 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string - // The archive state of the head object. + // The archive state of the head object. This functionality is not supported for + // directory buckets. ArchiveStatus types.ArchiveStatus // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). + // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not + // supported for directory buckets. BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string // Specifies presentational information for the object. ContentDisposition *string - // Specifies what content encodings have been applied to the object and thus what + // Indicates what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the // Content-Type header field. ContentEncoding *string @@ -256,17 +323,19 @@ type HeadObjectOutput struct { ContentType *string // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. + // Marker. If false, this response header does not appear in the response. This + // functionality is not supported for directory buckets. DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is - // URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ), the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. This functionality is not supported for directory + // buckets. Expiration *string // The date and time at which the object is no longer cacheable. @@ -283,24 +352,26 @@ type HeadObjectOutput struct { // This is set to the number of metadata entries not returned in x-amz-meta // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. + // can create metadata whose values are not legal HTTP headers. This functionality + // is not supported for directory buckets. MissingMeta *int32 // Specifies whether a legal hold is in effect for this object. This header is // only returned if the requester has the s3:GetObjectLegalHold permission. This // header is not returned if the specified version of this object has never had a // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . + // . This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // The Object Lock mode, if any, that's in effect for this object. This header is // only returned if the requester has the s3:GetObjectRetention permission. For // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . + // . This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. + // only returned if the requester has the s3:GetObjectRetention permission. This + // functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -333,11 +404,11 @@ type HeadObjectOutput struct { // completed for all destinations. If one or more destinations fails replication // the header will return FAILED. // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // . + // . This functionality is not supported for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If the object is an archived object (an object whose storage class is GLACIER), @@ -349,39 +420,48 @@ type HeadObjectOutput struct { // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header // returns the value ongoing-request="true" . For more information about archiving // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations) - // . + // . This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header confirming the encryption - // algorithm used. + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide round-trip message - // integrity verification of the customer-provided encryption key. + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header // for all objects except for S3 Standard storage class objects. For more // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . + // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass - // Version of the object. + // Version ID of the object. This functionality is not supported for directory + // buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -445,6 +525,9 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpHeadObjectValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/service/s3/api_op_ListBucketAnalyticsConfigurations.go index af62aa9bab8..6ecf8458b85 100644 --- a/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -10,18 +10,20 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the analytics configurations for the bucket. You can have up to 1,000 -// analytics configurations per bucket. This action supports list pagination and -// does not return more than 100 configurations at a time. You should always check -// the IsTruncated element in the response. If there are no more configurations to -// list, IsTruncated is set to false. If there are more configurations to list, -// IsTruncated is set to true, and there will be a value in NextContinuationToken . -// You use the NextContinuationToken value to continue the pagination of the list -// by passing the value in continuation-token in the request to GET the next page. -// To use this operation, you must have permissions to perform the +// This operation is not supported by directory buckets. Lists the analytics +// configurations for the bucket. You can have up to 1,000 analytics configurations +// per bucket. This action supports list pagination and does not return more than +// 100 configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated is +// set to false. If there are more configurations to list, IsTruncated is set to +// true, and there will be a value in NextContinuationToken . You use the +// NextContinuationToken value to continue the pagination of the list by passing +// the value in continuation-token in the request to GET the next page. To use +// this operation, you must have permissions to perform the // s3:GetAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource @@ -59,9 +61,9 @@ type ListBucketAnalyticsConfigurationsInput struct { // should begin. ContinuationToken *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -69,7 +71,7 @@ type ListBucketAnalyticsConfigurationsInput struct { func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type ListBucketAnalyticsConfigurationsOutput struct { @@ -152,6 +154,9 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index 1dd4ca5ac8d..de4a2079ae0 100644 --- a/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -10,11 +10,13 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the S3 Intelligent-Tiering configuration from the specified bucket. The -// S3 Intelligent-Tiering storage class is designed to optimize storage costs by +// This operation is not supported by directory buckets. Lists the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access @@ -63,7 +65,7 @@ type ListBucketIntelligentTieringConfigurationsInput struct { func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type ListBucketIntelligentTieringConfigurationsOutput struct { @@ -146,6 +148,9 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListBucketInventoryConfigurations.go b/service/s3/api_op_ListBucketInventoryConfigurations.go index 0000dccd225..881f7d92530 100644 --- a/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -10,14 +10,16 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of inventory configurations for the bucket. You can have up to -// 1,000 analytics configurations per bucket. This action supports list pagination -// and does not return more than 100 configurations at a time. Always check the -// IsTruncated element in the response. If there are no more configurations to -// list, IsTruncated is set to false. If there are more configurations to list, +// This operation is not supported by directory buckets. Returns a list of +// inventory configurations for the bucket. You can have up to 1,000 analytics +// configurations per bucket. This action supports list pagination and does not +// return more than 100 configurations at a time. Always check the IsTruncated +// element in the response. If there are no more configurations to list, +// IsTruncated is set to false. If there are more configurations to list, // IsTruncated is set to true, and there is a value in NextContinuationToken . You // use the NextContinuationToken value to continue the pagination of the list by // passing the value in continuation-token in the request to GET the next page. To @@ -60,9 +62,9 @@ type ListBucketInventoryConfigurationsInput struct { // Amazon S3 understands. ContinuationToken *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -70,7 +72,7 @@ type ListBucketInventoryConfigurationsInput struct { func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type ListBucketInventoryConfigurationsOutput struct { @@ -153,6 +155,9 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListBucketMetricsConfigurations.go b/service/s3/api_op_ListBucketMetricsConfigurations.go index 597e06824c6..fc2cf72877f 100644 --- a/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -13,12 +13,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the metrics configurations for the bucket. The metrics configurations are -// only for the request metrics of the bucket and do not provide information on -// daily storage metrics. You can have up to 1,000 configurations per bucket. This -// action supports list pagination and does not return more than 100 configurations -// at a time. Always check the IsTruncated element in the response. If there are -// no more configurations to list, IsTruncated is set to false. If there are more +// This operation is not supported by directory buckets. Lists the metrics +// configurations for the bucket. The metrics configurations are only for the +// request metrics of the bucket and do not provide information on daily storage +// metrics. You can have up to 1,000 configurations per bucket. This action +// supports list pagination and does not return more than 100 configurations at a +// time. Always check the IsTruncated element in the response. If there are no +// more configurations to list, IsTruncated is set to false. If there are more // configurations to list, IsTruncated is set to true, and there is a value in // NextContinuationToken . You use the NextContinuationToken value to continue the // pagination of the list by passing the value in continuation-token in the @@ -62,9 +63,9 @@ type ListBucketMetricsConfigurationsInput struct { // Amazon S3 understands. ContinuationToken *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -156,6 +157,9 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListBuckets.go b/service/s3/api_op_ListBuckets.go index 7e3c35299a3..a6189288723 100644 --- a/service/s3/api_op_ListBuckets.go +++ b/service/s3/api_op_ListBuckets.go @@ -13,10 +13,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of all buckets owned by the authenticated sender of the request. -// To use this operation, you must have the s3:ListAllMyBuckets permission. For -// information about Amazon S3 buckets, see Creating, configuring, and working -// with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// This operation is not supported by directory buckets. Returns a list of all +// buckets owned by the authenticated sender of the request. To use this operation, +// you must have the s3:ListAllMyBuckets permission. For information about Amazon +// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) // . func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { if params == nil { @@ -106,6 +106,9 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { return err } diff --git a/service/s3/api_op_ListDirectoryBuckets.go b/service/s3/api_op_ListDirectoryBuckets.go new file mode 100644 index 00000000000..373531ab273 --- /dev/null +++ b/service/s3/api_op_ListDirectoryBuckets.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all Amazon S3 directory buckets owned by the authenticated +// sender of the request. For more information about directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Regional endpoint. These endpoints +// support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions You must have the +// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy +// instead of a bucket policy. Cross-account access to this API operation isn't +// supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . +func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { + if params == nil { + params = &ListDirectoryBucketsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDirectoryBucketsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListDirectoryBucketsInput struct { + + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string + + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxDirectoryBuckets *int32 + + noSmithyDocumentSerde +} + +func (in *ListDirectoryBucketsInput) bindEndpointParams(p *EndpointParameters) { + + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type ListDirectoryBucketsOutput struct { + + // The list of buckets owned by the requester. + Buckets []types.Bucket + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + ContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListDirectoryBuckets{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListDirectoryBuckets{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDirectoryBuckets"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListDirectoryBucketsAPIClient is a client that implements the +// ListDirectoryBuckets operation. +type ListDirectoryBucketsAPIClient interface { + ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) +} + +var _ ListDirectoryBucketsAPIClient = (*Client)(nil) + +// ListDirectoryBucketsPaginatorOptions is the paginator options for +// ListDirectoryBuckets +type ListDirectoryBucketsPaginatorOptions struct { + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDirectoryBucketsPaginator is a paginator for ListDirectoryBuckets +type ListDirectoryBucketsPaginator struct { + options ListDirectoryBucketsPaginatorOptions + client ListDirectoryBucketsAPIClient + params *ListDirectoryBucketsInput + nextToken *string + firstPage bool +} + +// NewListDirectoryBucketsPaginator returns a new ListDirectoryBucketsPaginator +func NewListDirectoryBucketsPaginator(client ListDirectoryBucketsAPIClient, params *ListDirectoryBucketsInput, optFns ...func(*ListDirectoryBucketsPaginatorOptions)) *ListDirectoryBucketsPaginator { + if params == nil { + params = &ListDirectoryBucketsInput{} + } + + options := ListDirectoryBucketsPaginatorOptions{} + if params.MaxDirectoryBuckets != nil { + options.Limit = *params.MaxDirectoryBuckets + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDirectoryBucketsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDirectoryBucketsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDirectoryBuckets page. +func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxDirectoryBuckets = limit + + result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.ContinuationToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDirectoryBuckets", + } +} + +func addListDirectoryBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/service/s3/api_op_ListMultipartUploads.go b/service/s3/api_op_ListMultipartUploads.go index 2cc53f65dca..3e6853e689b 100644 --- a/service/s3/api_op_ListMultipartUploads.go +++ b/service/s3/api_op_ListMultipartUploads.go @@ -13,24 +13,66 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action lists in-progress multipart uploads. An in-progress multipart -// upload is a multipart upload that has been initiated using the Initiate -// Multipart Upload request, but has not yet been completed or aborted. This action -// returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads -// is the maximum number of uploads a response can include, which is also the -// default value. You can further limit the number of uploads in a response by -// specifying the max-uploads parameter in the response. If additional multipart -// uploads satisfy the list criteria, the response will contain an IsTruncated -// element with the value true. To list the additional multipart uploads, use the -// key-marker and upload-id-marker request parameters. In the response, the -// uploads are sorted by key. If your application has initiated more than one -// multipart upload using the same object key, then uploads in the response are -// first sorted by key. Additionally, uploads are sorted in ascending order within -// each key by the upload initiation time. For more information on multipart -// uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// . For information on permissions required to use the multipart upload API, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// . The following operations are related to ListMultipartUploads : +// This operation lists in-progress multipart uploads in a bucket. An in-progress +// multipart upload is a multipart upload that has been initiated by the +// CreateMultipartUpload request, but has not yet been completed or aborted. +// Directory buckets - If multipart uploads in a directory bucket are in progress, +// you can't delete the bucket until all the in-progress multipart uploads are +// aborted or completed. The ListMultipartUploads operation returns a maximum of +// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is +// also the default value. You can further limit the number of uploads in a +// response by specifying the max-uploads request parameter. If there are more +// than 1,000 multipart uploads that satisfy your ListMultipartUploads request, +// the response returns an IsTruncated element with the value of true , a +// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining +// multipart uploads, you need to make subsequent ListMultipartUploads requests. +// In these requests, include two query parameters: key-marker and upload-id-marker +// . Set the value of key-marker to the NextKeyMarker value from the previous +// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker +// value from the previous response. Directory buckets - The upload-id-marker +// element and the NextUploadIdMarker element aren't supported by directory +// buckets. To list the additional multipart uploads, you only need to set the +// value of key-marker to the NextKeyMarker value from the previous response. For +// more information about multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Sorting of multipart uploads in response +// - General purpose bucket - In the ListMultipartUploads response, the multipart +// uploads are sorted based on two criteria: +// - Key-based sorting - Multipart uploads are initially sorted in ascending +// order based on their object keys. +// - Time-based sorting - For uploads that share the same object key, they are +// further sorted in ascending order based on the upload initiation time. Among +// uploads with the same key, the one that was initiated first will appear before +// the ones that were initiated later. +// - Directory bucket - In the ListMultipartUploads response, the multipart +// uploads aren't sorted lexicographically based on the object keys. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to ListMultipartUploads : // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) @@ -53,16 +95,26 @@ func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipart type ListMultipartUploadsInput struct { - // The name of the bucket to which the multipart upload was initiated. When using - // this action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -77,7 +129,8 @@ type ListMultipartUploadsInput struct { // prefix are grouped under a single result element, CommonPrefixes . If you don't // specify the prefix parameter, then the substring starts at the beginning of the // key. The keys that are grouped under CommonPrefixes result element are not - // returned elsewhere in the response. + // returned elsewhere in the response. Directory buckets - For directory buckets, / + // is the only supported delimiter. Delimiter *string // Requests Amazon S3 to encode the object keys in the response and specifies the @@ -88,18 +141,26 @@ type ListMultipartUploadsInput struct { // response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // Together with upload-id-marker , this parameter specifies the multipart upload - // after which listing should begin. If upload-id-marker is not specified, only - // the keys lexicographically greater than the specified key-marker will be - // included in the list. If upload-id-marker is specified, any multipart uploads - // for a key equal to the key-marker might also be included, provided those - // multipart uploads have upload IDs lexicographically greater than the specified - // upload-id-marker . + // Specifies the multipart upload after which listing should begin. + // - General purpose buckets - For general purpose buckets, key-marker is an + // object key. Together with upload-id-marker , this parameter specifies the + // multipart upload after which listing should begin. If upload-id-marker is not + // specified, only the keys lexicographically greater than the specified + // key-marker will be included in the list. If upload-id-marker is specified, any + // multipart uploads for a key equal to the key-marker might also be included, + // provided those multipart uploads have upload IDs lexicographically greater than + // the specified upload-id-marker . + // - Directory buckets - For directory buckets, key-marker is obfuscated and + // isn't a real object key. The upload-id-marker parameter isn't supported by + // directory buckets. To list the additional multipart uploads, you only need to + // set the value of key-marker to the NextKeyMarker value from the previous + // response. In the ListMultipartUploads response, the multipart uploads aren't + // sorted lexicographically based on the object keys. KeyMarker *string // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the @@ -110,23 +171,26 @@ type ListMultipartUploadsInput struct { // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping of // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) + // use a folder in a file system.) Directory buckets - For directory buckets, only + // prefixes that end in a delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . + // greater than the specified upload-id-marker . This functionality is not + // supported for directory buckets. UploadIdMarker *string noSmithyDocumentSerde @@ -146,11 +210,14 @@ type ListMultipartUploadsOutput struct { // If you specify a delimiter in the request, then the result returns each // distinct key prefix containing the delimiter in a CommonPrefixes element. The - // distinct key prefixes are returned in the Prefix child element. + // distinct key prefixes are returned in the Prefix child element. Directory + // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are + // supported. CommonPrefixes []types.CommonPrefix // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. + // delimiter in your request, this element is absent from the response. Directory + // buckets - For directory buckets, / is the only supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object keys in the response. If you @@ -177,18 +244,22 @@ type ListMultipartUploadsOutput struct { NextKeyMarker *string // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. + // for the upload-id-marker request parameter in a subsequent request. This + // functionality is not supported for directory buckets. NextUploadIdMarker *string // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // Upload ID after which listing began. + // Upload ID after which listing began. This functionality is not supported for + // directory buckets. UploadIdMarker *string // Container for elements related to a particular multipart upload. A response can @@ -256,6 +327,9 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListObjectVersions.go b/service/s3/api_op_ListObjectVersions.go index 4080fcbdbdb..2a0cd10fa77 100644 --- a/service/s3/api_op_ListObjectVersions.go +++ b/service/s3/api_op_ListObjectVersions.go @@ -13,14 +13,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns metadata about all versions of the objects in a bucket. You can also -// use request parameters as selection criteria to return metadata about a subset -// of all the object versions. To use this operation, you must have permission to -// perform the s3:ListBucketVersions action. Be aware of the name difference. A -// 200 OK response can contain valid or invalid XML. Make sure to design your -// application to parse the contents of the response and handle it appropriately. -// To use this operation, you must have READ access to the bucket. The following -// operations are related to ListObjectVersions : +// This operation is not supported by directory buckets. Returns metadata about +// all versions of the objects in a bucket. You can also use request parameters as +// selection criteria to return metadata about a subset of all the object versions. +// To use this operation, you must have permission to perform the +// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK +// response can contain valid or invalid XML. Make sure to design your application +// to parse the contents of the response and handle it appropriately. To use this +// operation, you must have READ access to the bucket. The following operations are +// related to ListObjectVersions : // - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) @@ -62,9 +63,9 @@ type ListObjectVersionsInput struct { // response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Specifies the key to start with when listing objects in a bucket. @@ -90,11 +91,12 @@ type ListObjectVersionsInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // Specifies the object version you want to start listing from. @@ -162,7 +164,7 @@ type ListObjectVersionsOutput struct { Prefix *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Marks the last version of the key returned in a truncated response. @@ -232,6 +234,9 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListObjects.go b/service/s3/api_op_ListObjects.go index 822ce3e4662..f0732edc7d4 100644 --- a/service/s3/api_op_ListObjects.go +++ b/service/s3/api_op_ListObjects.go @@ -13,12 +13,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns some or all (up to 1,000) of the objects in a bucket. You can use the -// request parameters as selection criteria to return a subset of the objects in a -// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design -// your application to parse the contents of the response and handle it -// appropriately. This action has been revised. We recommend that you use the newer -// version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// This operation is not supported by directory buckets. Returns some or all (up +// to 1,000) of the objects in a bucket. You can use the request parameters as +// selection criteria to return a subset of the objects in a bucket. A 200 OK +// response can contain valid or invalid XML. Be sure to design your application to +// parse the contents of the response and handle it appropriately. This action has +// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) // , when developing applications. For backward compatibility, Amazon S3 continues // to support ListObjects . The following operations are related to ListObjects : // - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) @@ -43,16 +43,26 @@ func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optF type ListObjectsInput struct { - // The name of the bucket containing the objects. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the bucket containing the objects. Directory buckets - When you use + // this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -73,9 +83,9 @@ type ListObjectsInput struct { // response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts @@ -162,7 +172,7 @@ type ListObjectsOutput struct { Prefix *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -226,6 +236,9 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListObjectsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListObjectsV2.go b/service/s3/api_op_ListObjectsV2.go index 714edbfa4ae..b2f182dfb6d 100644 --- a/service/s3/api_op_ListObjectsV2.go +++ b/service/s3/api_op_ListObjectsV2.go @@ -17,21 +17,47 @@ import ( // You can use the request parameters as selection criteria to return a subset of // the objects in a bucket. A 200 OK response can contain valid or invalid XML. // Make sure to design your application to parse the contents of the response and -// handle it appropriately. Objects are returned sorted in an ascending order of -// the respective key names in the list. For more information about listing -// objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) -// in the Amazon S3 User Guide. To use this operation, you must have READ access to -// the bucket. To use this action in an Identity and Access Management (IAM) -// policy, you must have permission to perform the s3:ListBucket action. The -// bucket owner has this permission by default and can grant this permission to -// others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. This section describes the latest revision of this -// action. We recommend that you use this revised API operation for application -// development. For backward compatibility, Amazon S3 continues to support the -// prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) -// . To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// handle it appropriately. For more information about listing objects, see +// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// . Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use this operation, you must have +// READ access to the bucket. You must have permission to perform the +// s3:ListBucket action. The bucket owner has this permission by default and can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Sorting order of returned objects +// - General purpose bucket - For general purpose buckets, ListObjectsV2 returns +// objects in lexicographical order based on their key names. +// - Directory bucket - For directory buckets, ListObjectsV2 does not return +// objects in lexicographical order. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the +// latest revision of this action. We recommend that you use this revised API +// operation for application development. For backward compatibility, Amazon S3 +// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) // . The following operations are related to ListObjectsV2 : // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) @@ -53,15 +79,25 @@ func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, type ListObjectsV2Input struct { - // Bucket name to list. When using this action with an access point, you must - // direct requests to the access point hostname. The access point hostname takes - // the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - // using this action with an access point through the Amazon Web Services SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -72,23 +108,32 @@ type ListObjectsV2Input struct { Bucket *string // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real key. + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. ContinuationToken *string // A delimiter is a character that you use to group keys. + // - Directory buckets - For directory buckets, / is the only supported + // delimiter. + // - Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. For + // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) + // in the Amazon S3 User Guide. Delimiter *string // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The owner field is not present in ListObjectsV2 by default. If you want to // return the owner field with each key in the result, then set the FetchOwner - // field to true . + // field to true . Directory buckets - For directory buckets, the bucket owner is + // returned as the object owner for all objects. FetchOwner *bool // Sets the maximum number of keys returned in the response. By default, the @@ -97,19 +142,23 @@ type ListObjectsV2Input struct { MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. + // that you do not specify are not returned. This functionality is not supported + // for directory buckets. OptionalObjectAttributes []types.OptionalObjectAttributes - // Limits the response to keys that begin with the specified prefix. + // Limits the response to keys that begin with the specified prefix. Directory + // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are + // supported. Prefix *string // Confirms that the requester knows that she or he will be charged for the list // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. + // their requests. This functionality is not supported for directory buckets. RequestPayer types.RequestPayer // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. + // listing after this specified key. StartAfter can be any key in the bucket. This + // functionality is not supported for directory buckets. StartAfter *string noSmithyDocumentSerde @@ -123,28 +172,39 @@ func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { type ListObjectsV2Output struct { - // All of the keys (up to 1,000) rolled up into a common prefix count as a single - // return when calculating the number of returns. A response can contain - // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if - // there are any) keys between Prefix and the next occurrence of the string - // specified by a delimiter. CommonPrefixes lists keys that act like - // subdirectories in the directory specified by Prefix . For example, if the prefix - // is notes/ and the delimiter is a slash ( / ) as in notes/summer/july , the - // common prefix is notes/summer/ . All of the keys that roll up into a common - // prefix count as a single return when calculating the number of returns. + // All of the keys (up to 1,000) that share the same prefix are grouped together. + // When counting the total numbers of returns by this API operation, this group of + // keys is considered as one item. A response can contain CommonPrefixes only if + // you specify a delimiter. CommonPrefixes contains all (if there are any) keys + // between Prefix and the next occurrence of the string specified by a delimiter. + // CommonPrefixes lists keys that act like subdirectories in the directory + // specified by Prefix . For example, if the prefix is notes/ and the delimiter is + // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All + // of the keys that roll up into a common prefix count as a single return when + // calculating the number of returns. + // - Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. + // - Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. For + // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) + // in the Amazon S3 User Guide. CommonPrefixes []types.CommonPrefix // Metadata about each object returned. Contents []types.Object // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + // You can use this ContinuationToken for pagination of the list results. ContinuationToken *string // Causes keys that contain the same string between the prefix and the first // occurrence of the delimiter to be rolled up into a single result element in the // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. + // MaxKeys value. Directory buckets - For directory buckets, / is the only + // supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object key names in the XML response. @@ -168,20 +228,7 @@ type ListObjectsV2Output struct { // will never contain more. MaxKeys *int32 - // The bucket name. When using this action with an access point, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name. Name *string // NextContinuationToken is sent when isTruncated is true, which means there are @@ -190,14 +237,16 @@ type ListObjectsV2Output struct { // obfuscated and is not a real key NextContinuationToken *string - // Keys that begin with the indicated prefix. + // Keys that begin with the indicated prefix. Directory buckets - For directory + // buckets, only prefixes that end in a delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If StartAfter was sent with the request, it is included in the response. + // If StartAfter was sent with the request, it is included in the response. This + // functionality is not supported for directory buckets. StartAfter *string // Metadata pertaining to the operation's result. @@ -261,6 +310,9 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_ListParts.go b/service/s3/api_op_ListParts.go index ff3aab3df60..11b0d59b6d7 100644 --- a/service/s3/api_op_ListParts.go +++ b/service/s3/api_op_ListParts.go @@ -14,22 +14,49 @@ import ( "time" ) -// Lists the parts that have been uploaded for a specific multipart upload. This -// operation must include the upload ID, which you obtain by sending the initiate -// multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// ). This request returns a maximum of 1,000 uploaded parts. The default number of -// parts returned is 1,000 parts. You can restrict the number of parts returned by -// specifying the max-parts request parameter. If your multipart upload consists -// of more than 1,000 parts, the response returns an IsTruncated field with the -// value of true, and a NextPartNumberMarker element. In subsequent ListParts -// requests you can include the part-number-marker query string parameter and set -// its value to the NextPartNumberMarker field value from the previous response. -// If the upload was created using a checksum algorithm, you will need to have -// permission to the kms:Decrypt action for the request to succeed. For more +// Lists the parts that have been uploaded for a specific multipart upload. To use +// this operation, you must provide the upload ID in the request. You obtain this +// uploadID by sending the initiate multipart upload request through +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of +// 1,000 parts is also the default value. You can restrict the number of parts in a +// response by specifying the max-parts request parameter. If your multipart +// upload consists of more than 1,000 parts, the response returns an IsTruncated +// field with the value of true , and a NextPartNumberMarker element. To list +// remaining uploaded parts, in subsequent ListParts requests, include the +// part-number-marker query string parameter and set its value to the +// NextPartNumberMarker field value from the previous response. For more // information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// . For information on permissions required to use the multipart upload API, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// . The following operations are related to ListParts : +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. If the upload was created using server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must +// have permission to the kms:Decrypt action for the ListParts request to +// succeed. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to ListParts : // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) @@ -53,16 +80,26 @@ func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns . type ListPartsInput struct { - // The name of the bucket to which the parts are being uploaded. When using this - // action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the bucket to which the parts are being uploaded. Directory buckets + // - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -82,9 +119,9 @@ type ListPartsInput struct { // This member is required. UploadId *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Sets the maximum number of parts to return. @@ -96,29 +133,33 @@ type ListPartsInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde @@ -140,11 +181,13 @@ type ListPartsOutput struct { // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // . The response will also include the x-amz-abort-rule-id header that will // provide the ID of the lifecycle configuration rule that defines this action. + // This functionality is not supported for directory buckets. AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. + // incomplete multipart uploads. This functionality is not supported for directory + // buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not @@ -178,7 +221,8 @@ type ListPartsOutput struct { // Container element that identifies the object owner, after the object is // created. If multipart upload is initiated by an IAM user, this element provides - // the parent account ID and display name. + // the parent account ID and display name. Directory buckets - The bucket owner is + // returned as the object owner for all the parts. Owner *types.Owner // When a list is truncated, this element specifies the last part in the list, as @@ -191,11 +235,12 @@ type ListPartsOutput struct { Parts []types.Part // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded - // object. + // The class of storage used to store the uploaded object. Directory buckets - + // Only the S3 Express One Zone storage class is supported by directory buckets to + // store objects. StorageClass types.StorageClass // Upload ID identifying the multipart upload whose parts are being listed. @@ -262,6 +307,9 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpListPartsValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutBucketAccelerateConfiguration.go b/service/s3/api_op_PutBucketAccelerateConfiguration.go index 498c51b64da..80344efb1ca 100644 --- a/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -11,16 +11,18 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer -// Acceleration is a bucket-level feature that enables you to perform faster data -// transfers to Amazon S3. To use this operation, you must have permission to -// perform the s3:PutAccelerateConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. Sets the accelerate +// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a +// bucket-level feature that enables you to perform faster data transfers to Amazon +// S3. To use this operation, you must have permission to perform the +// s3:PutAccelerateConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . The Transfer Acceleration state of a bucket can be set to one of the following // two values: @@ -64,19 +66,19 @@ type PutBucketAccelerateConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -84,7 +86,7 @@ type PutBucketAccelerateConfigurationInput struct { func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketAccelerateConfigurationOutput struct { @@ -149,6 +151,9 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutBucketAcl.go b/service/s3/api_op_PutBucketAcl.go index 2174bc569c5..6382d27691d 100644 --- a/service/s3/api_op_PutBucketAcl.go +++ b/service/s3/api_op_PutBucketAcl.go @@ -11,13 +11,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the permissions on an existing bucket using access control lists (ACL). -// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// . To set the ACL of a bucket, you must have WRITE_ACP permission. You can use -// one of the following two ways to set a bucket's permissions: +// This operation is not supported by directory buckets. Sets the permissions on +// an existing bucket using access control lists (ACL). For more information, see +// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can +// use one of the following two ways to set a bucket's permissions: // - Specify the ACL in the request body // - Specify permissions using request headers // @@ -124,12 +126,12 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *types.AccessControlPolicy - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -141,9 +143,9 @@ type PutBucketAclInput struct { // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -169,7 +171,7 @@ type PutBucketAclInput struct { func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketAclOutput struct { @@ -234,6 +236,9 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { return err } @@ -270,6 +275,9 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/service/s3/api_op_PutBucketAnalyticsConfiguration.go index 9dd28ec8bef..9b1009f6515 100644 --- a/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -10,20 +10,21 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). You can have up to 1,000 analytics configurations per bucket. -// You can choose to have storage class analysis export analysis reports sent to a -// comma-separated values (CSV) flat file. See the DataExport request element. -// Reports are updated daily and are based on the object filters that you -// configure. When selecting data export, you specify a destination bucket and an -// optional destination prefix where the file is written. You can export the data -// to a destination bucket in a different account. However, the destination bucket -// must be in the same Region as the bucket that you are making the PUT analytics -// configuration to. For more information, see Amazon S3 Analytics – Storage Class -// Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// This operation is not supported by directory buckets. Sets an analytics +// configuration for the bucket (specified by the analytics configuration ID). You +// can have up to 1,000 analytics configurations per bucket. You can choose to have +// storage class analysis export analysis reports sent to a comma-separated values +// (CSV) flat file. See the DataExport request element. Reports are updated daily +// and are based on the object filters that you configure. When selecting data +// export, you specify a destination bucket and an optional destination prefix +// where the file is written. You can export the data to a destination bucket in a +// different account. However, the destination bucket must be in the same Region as +// the bucket that you are making the PUT analytics configuration to. For more +// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) // . You must create a bucket policy on the destination bucket where the exported // file is written to grant permissions to Amazon S3 to write objects to the // bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory @@ -84,9 +85,9 @@ type PutBucketAnalyticsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -94,7 +95,7 @@ type PutBucketAnalyticsConfigurationInput struct { func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketAnalyticsConfigurationOutput struct { @@ -159,6 +160,9 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutBucketCors.go b/service/s3/api_op_PutBucketCors.go index a5bd71b486f..394a2bad8bf 100644 --- a/service/s3/api_op_PutBucketCors.go +++ b/service/s3/api_op_PutBucketCors.go @@ -11,21 +11,23 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the cors configuration for your bucket. If the configuration exists, -// Amazon S3 replaces it. To use this operation, you must be allowed to perform the -// s3:PutBucketCORS action. By default, the bucket owner has this permission and -// can grant it to others. You set this configuration on a bucket so that the -// bucket can service cross-origin requests. For example, you might want to enable -// a request whose origin is http://www.example.com to access your Amazon S3 -// bucket at my.example.bucket.com by using the browser's XMLHttpRequest -// capability. To enable cross-origin resource sharing (CORS) on a bucket, you add -// the cors subresource to the bucket. The cors subresource is an XML document in -// which you configure rules that identify origins and the HTTP methods that can be -// executed on your bucket. The document is limited to 64 KB in size. When Amazon -// S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a +// This operation is not supported by directory buckets. Sets the cors +// configuration for your bucket. If the configuration exists, Amazon S3 replaces +// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it to +// others. You set this configuration on a bucket so that the bucket can service +// cross-origin requests. For example, you might want to enable a request whose +// origin is http://www.example.com to access your Amazon S3 bucket at +// my.example.bucket.com by using the browser's XMLHttpRequest capability. To +// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which you +// configure rules that identify origins and the HTTP methods that can be executed +// on your bucket. The document is limited to 64 KB in size. When Amazon S3 +// receives a cross-origin request (or a pre-flight OPTIONS request) against a // bucket, it evaluates the cors configuration on the bucket and uses the first // CORSRule rule that matches the incoming browser request to enable a cross-origin // request. For a rule to match, the following conditions must be met: @@ -71,12 +73,12 @@ type PutBucketCorsInput struct { // This member is required. CORSConfiguration *types.CORSConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -88,9 +90,9 @@ type PutBucketCorsInput struct { // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -98,7 +100,7 @@ type PutBucketCorsInput struct { func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketCorsOutput struct { @@ -163,6 +165,9 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -199,6 +204,9 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketEncryption.go b/service/s3/api_op_PutBucketEncryption.go index 7f060b97dbf..615e98a3d6c 100644 --- a/service/s3/api_op_PutBucketEncryption.go +++ b/service/s3/api_op_PutBucketEncryption.go @@ -11,17 +11,19 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action uses the encryption subresource to configure default encryption and -// Amazon S3 Bucket Keys for an existing bucket. By default, all buckets have a -// default encryption configuration that uses server-side encryption with Amazon S3 -// managed keys (SSE-S3). You can optionally configure default encryption for a -// bucket by using server-side encryption with Key Management Service (KMS) keys -// (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys -// (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also -// configure Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// This operation is not supported by directory buckets. This action uses the +// encryption subresource to configure default encryption and Amazon S3 Bucket Keys +// for an existing bucket. By default, all buckets have a default encryption +// configuration that uses server-side encryption with Amazon S3 managed keys +// (SSE-S3). You can optionally configure default encryption for a bucket by using +// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or +// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). +// If you specify default encryption by using SSE-KMS, you can also configure +// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) // . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does // not validate the KMS key ID provided in PutBucketEncryption requests. This @@ -71,12 +73,12 @@ type PutBucketEncryptionInput struct { // This member is required. ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -87,9 +89,9 @@ type PutBucketEncryptionInput struct { // automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -97,7 +99,7 @@ type PutBucketEncryptionInput struct { func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketEncryptionOutput struct { @@ -162,6 +164,9 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -198,6 +203,9 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go index bd8ed472c45..a5f8fc7ad44 100644 --- a/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ b/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -10,11 +10,13 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can -// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 +// This operation is not supported by directory buckets. Puts a S3 +// Intelligent-Tiering configuration to the specified bucket. You can have up to +// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering @@ -81,7 +83,7 @@ type PutBucketIntelligentTieringConfigurationInput struct { func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketIntelligentTieringConfigurationOutput struct { @@ -146,6 +148,9 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutBucketInventoryConfiguration.go b/service/s3/api_op_PutBucketInventoryConfiguration.go index 314eaa4561e..e2d06796a48 100644 --- a/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -10,22 +10,23 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the PUT action adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. Amazon S3 inventory generates inventories -// of the objects in the bucket on a daily or weekly basis, and the results are -// published to a flat file. The bucket that is inventoried is called the source -// bucket, and the bucket where the inventory flat file is stored is called the -// destination bucket. The destination bucket must be in the same Amazon Web -// Services Region as the source bucket. When you configure an inventory for a -// source bucket, you specify the destination bucket where you want the inventory -// to be stored, and whether to generate the inventory daily or weekly. You can -// also configure what object metadata to include and whether to inventory all -// object versions or only current versions. For more information, see Amazon S3 -// Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// This operation is not supported by directory buckets. This implementation of +// the PUT action adds an inventory configuration (identified by the inventory ID) +// to the bucket. You can have up to 1,000 inventory configurations per bucket. +// Amazon S3 inventory generates inventories of the objects in the bucket on a +// daily or weekly basis, and the results are published to a flat file. The bucket +// that is inventoried is called the source bucket, and the bucket where the +// inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same Amazon Web Services Region as the source bucket. When +// you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate the +// inventory daily or weekly. You can also configure what object metadata to +// include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) // in the Amazon S3 User Guide. You must create a bucket policy on the destination // bucket to grant permissions to Amazon S3 to write objects to the bucket in the // defined location. For an example policy, see Granting Permissions for Amazon S3 @@ -87,9 +88,9 @@ type PutBucketInventoryConfigurationInput struct { // This member is required. InventoryConfiguration *types.InventoryConfiguration - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -97,7 +98,7 @@ type PutBucketInventoryConfigurationInput struct { func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketInventoryConfigurationOutput struct { @@ -162,6 +163,9 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutBucketLifecycleConfiguration.go b/service/s3/api_op_PutBucketLifecycleConfiguration.go index 77792dfecee..ac2b63ebb2b 100644 --- a/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -11,14 +11,16 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. Keep in mind that this will overwrite an existing -// lifecycle configuration, so if you want to retain any configuration details, -// they must be included in the new lifecycle configuration. For information about -// lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// This operation is not supported by directory buckets. Creates a new lifecycle +// configuration for the bucket or replaces an existing lifecycle configuration. +// Keep in mind that this will overwrite an existing lifecycle configuration, so if +// you want to retain any configuration details, they must be included in the new +// lifecycle configuration. For information about lifecycle configuration, see +// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) // . Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, or a combination of both. // Accordingly, this section describes the latest API. The previous version of the @@ -86,19 +88,19 @@ type PutBucketLifecycleConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Container for lifecycle rules. You can add as many as 1,000 rules. @@ -109,7 +111,7 @@ type PutBucketLifecycleConfigurationInput struct { func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketLifecycleConfigurationOutput struct { @@ -174,6 +176,9 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -210,6 +215,9 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketLogging.go b/service/s3/api_op_PutBucketLogging.go index 6db91d1a5e6..e69fa24c70e 100644 --- a/service/s3/api_op_PutBucketLogging.go +++ b/service/s3/api_op_PutBucketLogging.go @@ -11,20 +11,21 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Set the logging parameters for a bucket and to specify permissions for who can -// view and modify the logging parameters. All logs are saved to buckets in the -// same Amazon Web Services Region as the source bucket. To set the logging status -// of a bucket, you must be the bucket owner. The bucket owner is automatically -// granted FULL_CONTROL to all logs. You use the Grantee request element to grant -// access to other people. The Permissions request element specifies the kind of -// access the grantee has to the logs. If the target bucket for log delivery uses -// the bucket owner enforced setting for S3 Object Ownership, you can't use the -// Grantee request element to grant access to others. Permissions can only be -// granted using policies. For more information, see Permissions for server access -// log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// This operation is not supported by directory buckets. Set the logging +// parameters for a bucket and to specify permissions for who can view and modify +// the logging parameters. All logs are saved to buckets in the same Amazon Web +// Services Region as the source bucket. To set the logging status of a bucket, you +// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL +// to all logs. You use the Grantee request element to grant access to other +// people. The Permissions request element specifies the kind of access the +// grantee has to the logs. If the target bucket for log delivery uses the bucket +// owner enforced setting for S3 Object Ownership, you can't use the Grantee +// request element to grant access to others. Permissions can only be granted using +// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) // in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) // to whom you're assigning access rights (by using request elements) in the // following ways: @@ -74,12 +75,12 @@ type PutBucketLoggingInput struct { // This member is required. BucketLoggingStatus *types.BucketLoggingStatus - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -89,9 +90,9 @@ type PutBucketLoggingInput struct { // this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -99,7 +100,7 @@ type PutBucketLoggingInput struct { func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketLoggingOutput struct { @@ -164,6 +165,9 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -200,6 +204,9 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketMetricsConfiguration.go b/service/s3/api_op_PutBucketMetricsConfiguration.go index 5afe0466546..099736c11e9 100644 --- a/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -10,18 +10,20 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets a metrics configuration (specified by the metrics configuration ID) for -// the bucket. You can have up to 1,000 metrics configurations per bucket. If -// you're updating an existing metrics configuration, note that this is a full -// replacement of the existing metrics configuration. If you don't include the -// elements you want to keep, they are erased. To use this operation, you must have -// permissions to perform the s3:PutMetricsConfiguration action. The bucket owner -// has this permission by default. The bucket owner can grant this permission to -// others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// This operation is not supported by directory buckets. Sets a metrics +// configuration (specified by the metrics configuration ID) for the bucket. You +// can have up to 1,000 metrics configurations per bucket. If you're updating an +// existing metrics configuration, note that this is a full replacement of the +// existing metrics configuration. If you don't include the elements you want to +// keep, they are erased. To use this operation, you must have permissions to +// perform the s3:PutMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // . For information about CloudWatch request metrics for Amazon S3, see // Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) @@ -68,9 +70,9 @@ type PutBucketMetricsConfigurationInput struct { // This member is required. MetricsConfiguration *types.MetricsConfiguration - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -78,7 +80,7 @@ type PutBucketMetricsConfigurationInput struct { func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketMetricsConfigurationOutput struct { @@ -143,6 +145,9 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutBucketNotificationConfiguration.go b/service/s3/api_op_PutBucketNotificationConfiguration.go index 6fc3112402f..7139f6ea6a8 100644 --- a/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -10,11 +10,13 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Enables notifications of specified events for a bucket. For more information -// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// This operation is not supported by directory buckets. Enables notifications of +// specified events for a bucket. For more information about event notifications, +// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // . Using this API, you can replace an existing notification configuration. The // configuration is an XML file that defines the event types that you want Amazon // S3 to publish and the destination where you want Amazon S3 to publish an event @@ -75,9 +77,9 @@ type PutBucketNotificationConfigurationInput struct { // This member is required. NotificationConfiguration *types.NotificationConfiguration - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or @@ -89,7 +91,7 @@ type PutBucketNotificationConfigurationInput struct { func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketNotificationConfigurationOutput struct { @@ -154,6 +156,9 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutBucketOwnershipControls.go b/service/s3/api_op_PutBucketOwnershipControls.go index 695054d5919..f89f86d65ed 100644 --- a/service/s3/api_op_PutBucketOwnershipControls.go +++ b/service/s3/api_op_PutBucketOwnershipControls.go @@ -11,12 +11,14 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:PutBucketOwnershipControls permission. For more -// information about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) +// This operation is not supported by directory buckets. Creates or modifies +// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have +// the s3:PutBucketOwnershipControls permission. For more information about Amazon +// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) // . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html) // . The following operations are related to PutBucketOwnershipControls : // - GetBucketOwnershipControls @@ -54,9 +56,9 @@ type PutBucketOwnershipControlsInput struct { // this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -64,7 +66,7 @@ type PutBucketOwnershipControlsInput struct { func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketOwnershipControlsOutput struct { @@ -129,6 +131,9 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -165,6 +170,9 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketPolicy.go b/service/s3/api_op_PutBucketPolicy.go index b2050c85485..b3da186ac83 100644 --- a/service/s3/api_op_PutBucketPolicy.go +++ b/service/s3/api_op_PutBucketPolicy.go @@ -11,25 +11,50 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an -// identity other than the root user of the Amazon Web Services account that owns -// the bucket, the calling identity must have the PutBucketPolicy permissions on -// the specified bucket and belong to the bucket owner's account in order to use -// this operation. If you don't have PutBucketPolicy permissions, Amazon S3 -// returns a 403 Access Denied error. If you have the correct permissions, but -// you're not using an identity that belongs to the bucket owner's account, Amazon -// S3 returns a 405 Method Not Allowed error. To ensure that bucket owners don't +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - +// For directory buckets, you must make requests for this API operation to the +// Regional endpoint. These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the PutBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a +// 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't // inadvertently lock themselves out of their own buckets, the root principal in a // bucket owner's Amazon Web Services account can perform the GetBucketPolicy , // PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket // policy explicitly denies the root principal's access. Bucket owner root // principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. For more information, -// see Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// . The following operations are related to PutBucketPolicy : +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:PutBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See +// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. Directory bucket example bucket policies - See +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . The +// following operations are related to PutBucketPolicy : // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { @@ -49,38 +74,62 @@ func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInp type PutBucketPolicyInput struct { - // The name of the bucket. + // The name of the bucket. Directory buckets - When you use this operation with a + // directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The bucket policy as a JSON document. + // The bucket policy as a JSON document. For directory buckets, the only IAM + // action supported in the bucket policy is s3express:CreateSession . // // This member is required. Policy *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . For directory buckets, when you + // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's + // used for performance. ChecksumAlgorithm types.ChecksumAlgorithm // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. + // to change this bucket policy in the future. This functionality is not supported + // for directory buckets. ConfirmRemoveSelfBucketAccess *bool // The MD5 hash of the request body. For requests made using the Amazon Web // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // calculated automatically. This functionality is not supported for directory + // buckets. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde @@ -88,7 +137,7 @@ type PutBucketPolicyInput struct { func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketPolicyOutput struct { @@ -153,6 +202,9 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -189,6 +241,9 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketReplication.go b/service/s3/api_op_PutBucketReplication.go index 0690f20e26d..ddf58ad8895 100644 --- a/service/s3/api_op_PutBucketReplication.go +++ b/service/s3/api_op_PutBucketReplication.go @@ -11,11 +11,12 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a replication configuration or replaces an existing one. For more -// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// This operation is not supported by directory buckets. Creates a replication +// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 User Guide. Specify the replication configuration in the // request body. In the replication configuration, you provide the name of the // destination bucket or buckets where you want Amazon S3 to replicate objects, the @@ -83,12 +84,12 @@ type PutBucketReplicationInput struct { // This member is required. ReplicationConfiguration *types.ReplicationConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -100,9 +101,9 @@ type PutBucketReplicationInput struct { // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // A token to allow Object Lock to be enabled for an existing bucket. @@ -113,7 +114,7 @@ type PutBucketReplicationInput struct { func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketReplicationOutput struct { @@ -178,6 +179,9 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -214,6 +218,9 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketRequestPayment.go b/service/s3/api_op_PutBucketRequestPayment.go index 180b2f16adc..d1dc5a76549 100644 --- a/service/s3/api_op_PutBucketRequestPayment.go +++ b/service/s3/api_op_PutBucketRequestPayment.go @@ -11,13 +11,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download will -// be charged for the download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) +// This operation is not supported by directory buckets. Sets the request payment +// configuration for a bucket. By default, the bucket owner pays for downloads from +// the bucket. This configuration parameter enables the bucket owner (only) to +// specify that the person requesting the download will be charged for the +// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) // . The following operations are related to PutBucketRequestPayment : // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) @@ -48,12 +50,12 @@ type PutBucketRequestPaymentInput struct { // This member is required. RequestPaymentConfiguration *types.RequestPaymentConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -65,9 +67,9 @@ type PutBucketRequestPaymentInput struct { // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -75,7 +77,7 @@ type PutBucketRequestPaymentInput struct { func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketRequestPaymentOutput struct { @@ -140,6 +142,9 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -176,6 +181,9 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketTagging.go b/service/s3/api_op_PutBucketTagging.go index 6264943a1fe..725facc1138 100644 --- a/service/s3/api_op_PutBucketTagging.go +++ b/service/s3/api_op_PutBucketTagging.go @@ -11,17 +11,19 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the tags for a bucket. Use tags to organize your Amazon Web Services bill -// to reflect your own cost structure. To do this, sign up to get your Amazon Web -// Services account bill with tag key values included. Then, to see the cost of -// combined resources, organize your billing information according to resources -// with the same tag key values. For example, you can tag several resources with a -// specific application name, and then organize your billing information to see the -// total cost of that application across several services. For more information, -// see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// This operation is not supported by directory buckets. Sets the tags for a +// bucket. Use tags to organize your Amazon Web Services bill to reflect your own +// cost structure. To do this, sign up to get your Amazon Web Services account bill +// with tag key values included. Then, to see the cost of combined resources, +// organize your billing information according to resources with the same tag key +// values. For example, you can tag several resources with a specific application +// name, and then organize your billing information to see the total cost of that +// application across several services. For more information, see Cost Allocation +// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) // and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) // . When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to an @@ -73,12 +75,12 @@ type PutBucketTaggingInput struct { // This member is required. Tagging *types.Tagging - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -90,9 +92,9 @@ type PutBucketTaggingInput struct { // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -100,7 +102,7 @@ type PutBucketTaggingInput struct { func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketTaggingOutput struct { @@ -165,6 +167,9 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -201,6 +206,9 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketVersioning.go b/service/s3/api_op_PutBucketVersioning.go index 28ee7cef56b..c2b751ab77a 100644 --- a/service/s3/api_op_PutBucketVersioning.go +++ b/service/s3/api_op_PutBucketVersioning.go @@ -11,15 +11,17 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the versioning state of an existing bucket. You can set the versioning -// state with one of the following values: Enabled—Enables versioning for the -// objects in the bucket. All objects added to the bucket receive a unique version -// ID. Suspended—Disables versioning for the objects in the bucket. All objects -// added to the bucket receive the version ID null. If the versioning state has -// never been set on a bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// This operation is not supported by directory buckets. Sets the versioning state +// of an existing bucket. You can set the versioning state with one of the +// following values: Enabled—Enables versioning for the objects in the bucket. All +// objects added to the bucket receive a unique version ID. Suspended—Disables +// versioning for the objects in the bucket. All objects added to the bucket +// receive the version ID null. If the versioning state has never been set on a +// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) // request does not return a versioning state value. In order to enable MFA Delete, // you must be the bucket owner. If you are the bucket owner and want to enable MFA // Delete in the bucket versioning configuration, you must include the x-amz-mfa @@ -62,12 +64,12 @@ type PutBucketVersioningInput struct { // This member is required. VersioningConfiguration *types.VersioningConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -79,9 +81,9 @@ type PutBucketVersioningInput struct { // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The concatenation of the authentication device's serial number, a space, and @@ -93,7 +95,7 @@ type PutBucketVersioningInput struct { func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketVersioningOutput struct { @@ -158,6 +160,9 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -194,6 +199,9 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutBucketWebsite.go b/service/s3/api_op_PutBucketWebsite.go index 8a8e6dbb094..27555453080 100644 --- a/service/s3/api_op_PutBucketWebsite.go +++ b/service/s3/api_op_PutBucketWebsite.go @@ -11,14 +11,15 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the configuration of the website that is specified in the website -// subresource. To configure a bucket as a website, you can add this subresource on -// the bucket with website configuration information such as the file name of the -// index document and any redirect rules. For more information, see Hosting -// Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// This operation is not supported by directory buckets. Sets the configuration of +// the website that is specified in the website subresource. To configure a bucket +// as a website, you can add this subresource on the bucket with website +// configuration information such as the file name of the index document and any +// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) // . This PUT action requires the S3:PutBucketWebsite permission. By default, only // the bucket owner can configure the website attached to a bucket; however, bucket // owners can allow other users to set the website configuration by writing a @@ -84,12 +85,12 @@ type PutBucketWebsiteInput struct { // This member is required. WebsiteConfiguration *types.WebsiteConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -101,9 +102,9 @@ type PutBucketWebsiteInput struct { // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -111,7 +112,7 @@ type PutBucketWebsiteInput struct { func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketWebsiteOutput struct { @@ -176,6 +177,9 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -212,6 +216,9 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutObject.go b/service/s3/api_op_PutObject.go index f4207d49663..1bade82e9a3 100644 --- a/service/s3/api_op_PutObject.go +++ b/service/s3/api_op_PutObject.go @@ -17,71 +17,75 @@ import ( "time" ) -// Adds an object to a bucket. You must have WRITE permissions on a bucket to add -// an object to it. Amazon S3 never adds partial objects; if you receive a success -// response, Amazon S3 added the entire object to the bucket. You cannot use -// PutObject to only update a single piece of metadata for an existing object. You -// must put the entire object with updated metadata if you want to update some -// values. Amazon S3 is a distributed system. If it receives multiple write -// requests for the same object simultaneously, it overwrites all but the last -// object written. To prevent objects from being deleted or overwritten, you can -// use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) -// . To ensure that data is not corrupted traversing the network, use the -// Content-MD5 header. When you use this header, Amazon S3 checks the object -// against the provided MD5 value and, if they do not match, returns an error. -// Additionally, you can calculate the MD5 while putting an object to Amazon S3 and -// compare the returned ETag to the calculated MD5 value. -// - To successfully complete the PutObject request, you must have the -// s3:PutObject in your IAM permissions. -// - To successfully change the objects acl of your PutObject request, you must -// have the s3:PutObjectAcl in your IAM permissions. -// - To successfully set the tag-set with your PutObject request, you must have -// the s3:PutObjectTagging in your IAM permissions. -// - The Content-MD5 header is required for any request to upload an object with -// a retention period configured using Amazon S3 Object Lock. For more information -// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// Adds an object to a bucket. +// - Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. You cannot use PutObject to +// only update a single piece of metadata for an existing object. You must put the +// entire object with updated metadata if you want to update some values. +// - If your bucket uses the bucket owner enforced setting for Object Ownership, +// ACLs are disabled and no longer affect permissions. All objects written to the +// bucket by any account will be owned by the bucket owner. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. // -// You have four mutually exclusive options to protect data using server-side -// encryption in Amazon S3, depending on how you choose to manage the encryption -// keys. Specifically, the encryption key options are Amazon S3 managed keys -// (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and -// customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side -// encryption by using Amazon S3 managed keys (SSE-S3) by default. You can -// optionally tell Amazon S3 to encrypt data at rest by using server-side -// encryption with other key options. For more information, see Using Server-Side -// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// . When adding a new object, you can use headers to grant ACL-based permissions -// to individual Amazon Web Services accounts or to predefined groups defined by -// Amazon S3. These permissions are then added to the ACL on the object. By -// default, all objects are private. Only the owner has full access control. For -// more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) -// . If the bucket that you're uploading objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. Buckets that use this setting only accept PUT requests that don't -// specify an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this ACL -// expressed in the XML format. PUT requests that contain other ACLs (for example, -// custom grants to certain Amazon Web Services accounts) fail and return a 400 -// error with the error code AccessControlListNotSupported . For more information, -// see Controlling ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for Object Ownership, all objects written to the bucket by any account -// will be owned by the bucket owner. By default, Amazon S3 uses the STANDARD -// Storage Class to store newly created objects. The STANDARD storage class -// provides high durability and high availability. Depending on performance needs, -// you can specify a different Storage Class. Amazon S3 on Outposts only uses the -// OUTPOSTS Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. If you enable versioning for a bucket, Amazon S3 -// automatically generates a unique version ID for the object being stored. Amazon -// S3 returns this ID in the response. When you enable versioning for a bucket, if -// Amazon S3 receives multiple write requests for the same object simultaneously, -// it stores all of the objects. For more information about versioning, see Adding -// Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) -// . For information about returning the versioning state of a bucket, see -// GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// . For more information about related Amazon S3 APIs, see the following: +// Amazon S3 is a distributed system. If it receives multiple write requests for +// the same object simultaneously, it overwrites all but the last object written. +// However, Amazon S3 provides features that can modify this behavior: +// - S3 Object Lock - To prevent objects from being deleted or overwritten, you +// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. +// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 +// receives multiple write requests for the same object simultaneously, it stores +// all versions of the objects. For each write request that is made to the same +// object, Amazon S3 automatically generates a unique version ID of that object +// being stored in Amazon S3. You can retrieve, replace, or delete any version of +// the object. For more information about versioning, see Adding Objects to +// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) +// in the Amazon S3 User Guide. For information about returning the versioning +// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// . This functionality is not supported for directory buckets. +// +// Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your PutObject request includes specific headers. +// - s3:PutObject - To successfully complete the PutObject request, you must +// always have the s3:PutObject permission on a bucket to add an object to it. +// - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +// request, you must have the s3:PutObjectAcl . +// - s3:PutObjectTagging - To successfully set the tag-set with your PutObject +// request, you must have the s3:PutObjectTagging . +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Data integrity with Content-MD5 +// - General purpose bucket - To ensure that data is not corrupted traversing +// the network, use the Content-MD5 header. When you use this header, Amazon S3 +// checks the object against the provided MD5 value and, if they do not match, +// Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 +// digest, you can calculate the MD5 while putting the object to Amazon S3 and +// compare the returned ETag to the calculated MD5 value. +// - Directory bucket - This functionality is not supported for directory +// buckets. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about +// related Amazon S3 APIs, see the following: // - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { @@ -101,16 +105,26 @@ func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns . type PutObjectInput struct { - // The bucket name to which the PUT action was initiated. When using this action - // with an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The bucket name to which the PUT action was initiated. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -126,7 +140,25 @@ type PutObjectInput struct { Key *string // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // . This action is not supported by Amazon S3 on Outposts. + // in the Amazon S3 User Guide. When adding a new object, you can use headers to + // grant ACL-based permissions to individual Amazon Web Services accounts or to + // predefined groups defined by Amazon S3. These permissions are then added to the + // ACL on the object. By default, all objects are private. Only the owner has full + // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses + // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and + // no longer affect permissions. Buckets that use this setting only accept PUT + // requests that don't specify an ACL or PUT requests that specify bucket owner + // full control ACLs, such as the bucket-owner-full-control canned ACL or an + // equivalent form of this ACL expressed in the XML format. PUT requests that + // contain other ACLs (for example, custom grants to certain Amazon Web Services + // accounts) fail and return a 400 error with the error code + // AccessControlListNotSupported . For more information, see Controlling + // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. ACL types.ObjectCannedACL // Object data. @@ -136,7 +168,8 @@ type PutObjectInput struct { // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - // bucket-level settings for S3 Bucket Key. + // bucket-level settings for S3 Bucket Key. This functionality is not supported for + // directory buckets. BucketKeyEnabled *bool // Can be used to specify caching behavior along the request/reply chain. For more @@ -144,14 +177,25 @@ type PutObjectInput struct { // . CacheControl *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . For directory buckets, when you + // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's + // used for performance. ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data @@ -209,7 +253,11 @@ type PutObjectInput struct { // optional, we recommend using the Content-MD5 mechanism as an end-to-end // integrity check. For more information about REST request authentication, see // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // . + // . The Content-MD5 header is required for any request to upload an object with a + // retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. ContentMD5 *string // A standard MIME type describing the format of the contents. For more @@ -217,9 +265,9 @@ type PutObjectInput struct { // . ContentType *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. For more @@ -227,20 +275,24 @@ type PutObjectInput struct { // . Expires *time.Time - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This - // action is not supported by Amazon S3 on Outposts. + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to read the object data and its metadata. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to read the object data and its metadata. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the object ACL. This action is not supported by Amazon - // S3 on Outposts. + // Allows grantee to read the object ACL. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to write the ACL for the applicable object. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // A map of metadata to store with the object in S3. @@ -248,39 +300,45 @@ type PutObjectInput struct { // Specifies whether a legal hold will be applied to this object. For more // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to this object. + // The Object Lock mode that you want to apply to this object. This functionality + // is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. + // formatted as a timestamp parameter. This functionality is not supported for + // directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256 + // ). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object @@ -288,7 +346,8 @@ type PutObjectInput struct { // JSON with the encryption context key-value pairs. This value is stored as object // metadata and automatically gets passed on to Amazon Web Services KMS for future // GetObject or CopyObject operations on this object. This value must be - // explicitly added during CopyObject operations. + // explicitly added during CopyObject operations. This functionality is not + // supported for directory buckets. SSEKMSEncryptionContext *string // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , @@ -299,37 +358,53 @@ type PutObjectInput struct { // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not // exist in the same account that's issuing the command, you must use the full ARN - // and not just the ID. + // and not just the ID. This functionality is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose + // buckets - You have four mutually exclusive options to protect data using + // server-side encryption in Amazon S3, depending on how you choose to manage the + // encryption keys. Specifically, the encryption key options are Amazon S3 managed + // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and + // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side + // encryption by using Amazon S3 managed keys (SSE-S3) by default. You can + // optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is + // supported. ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For - // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // - For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass types.StorageClass // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. (For example, "Key1=Value1") + // parameters. (For example, "Key1=Value1") This functionality is not supported for + // directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the // value of this header in the object metadata. For information about object // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) - // . In the following example, the request header sets the redirect to an object - // (anotherPage.html) in the same bucket: x-amz-website-redirect-location: - // /anotherPage.html In the following example, the request header sets the object - // redirect to another website: x-amz-website-redirect-location: - // http://www.example.com/ For more information about website hosting in Amazon S3, - // see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // in the Amazon S3 User Guide. In the following example, the request header sets + // the redirect to an object (anotherPage.html) in the same bucket: + // x-amz-website-redirect-location: /anotherPage.html In the following example, the + // request header sets the object redirect to another website: + // x-amz-website-redirect-location: http://www.example.com/ For more information + // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) - // . + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde @@ -344,78 +419,107 @@ func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string - // Entity tag for the uploaded object. + // Entity tag for the uploaded object. General purpose buckets - To ensure that + // data is not corrupted traversing the network, for objects where the ETag is the + // MD5 digest of the object, you can calculate the MD5 while putting an object to + // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory + // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of + // the object. ETag *string // If the expiration is configured for the object (see // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id - // key-value pairs that provide information about object expiration. The value of - // the rule-id is URL-encoded. + // ) in the Amazon S3 User Guide, the response includes this header. It includes + // the expiry-date and rule-id key-value pairs that provide information about + // object expiration. The value of the rule-id is URL-encoded. This functionality + // is not supported for directory buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header confirming the encryption - // algorithm used. + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide round-trip message - // integrity verification of the customer-provided encryption key. + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string // holding JSON with the encryption context key-value pairs. This value is stored // as object metadata and automatically gets passed on to Amazon Web Services KMS - // for future GetObject or CopyObject operations on this object. + // for future GetObject or CopyObject operations on this object. This + // functionality is not supported for directory buckets. SSEKMSEncryptionContext *string // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. + // this header indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption - // Version of the object. + // Version ID of the object. If you enable versioning for a bucket, Amazon S3 + // automatically generates a unique version ID for the object being stored. Amazon + // S3 returns this ID in the response. When you enable versioning for a bucket, if + // Amazon S3 receives multiple write requests for the same object simultaneously, + // it stores all of the objects. For more information about versioning, see Adding + // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) + // in the Amazon S3 User Guide. For information about returning the versioning + // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) + // . This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -479,6 +583,9 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutObjectValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_PutObjectAcl.go b/service/s3/api_op_PutObjectAcl.go index 8aebe7e1418..5716f550f6b 100644 --- a/service/s3/api_op_PutObjectAcl.go +++ b/service/s3/api_op_PutObjectAcl.go @@ -14,15 +14,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Uses the acl subresource to set the access control list (ACL) permissions for a -// new or existing object in an S3 bucket. You must have WRITE_ACP permission to -// set the ACL of an object. For more information, see What permissions can I -// grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon S3 User Guide. This action is not supported by Amazon S3 on -// Outposts. Depending on your application needs, you can choose to set the ACL on -// an object using either the request body or the headers. For example, if you have -// an existing application that updates a bucket ACL using the request body, you -// can continue to use that approach. For more information, see Access Control +// This operation is not supported by directory buckets. Uses the acl subresource +// to set the access control list (ACL) permissions for a new or existing object in +// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an +// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3 +// on Outposts. Depending on your application needs, you can choose to set the ACL +// on an object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request body, +// you can continue to use that approach. For more information, see Access Control // List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) // in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced // setting for S3 Object Ownership, ACLs are disabled and no longer affect @@ -113,27 +113,17 @@ func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, op type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the ACL. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // This member is required. - Bucket *string - - // Key for which the PUT action was initiated. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -141,6 +131,11 @@ type PutObjectAclInput struct { // in the Amazon S3 User Guide. // // This member is required. + Bucket *string + + // Key for which the PUT action was initiated. + // + // This member is required. Key *string // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) @@ -150,12 +145,12 @@ type PutObjectAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *types.AccessControlPolicy - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -167,21 +162,21 @@ type PutObjectAclInput struct { // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This action is not supported by Amazon S3 on Outposts. + // bucket. This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This action is not supported - // by Amazon S3 on Outposts. + // Allows grantee to list the objects in the bucket. This functionality is not + // supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the bucket ACL. This action is not supported by Amazon - // S3 on Outposts. + // Allows grantee to read the bucket ACL. This functionality is not supported for + // Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to create new objects in the bucket. For the bucket and object @@ -189,20 +184,22 @@ type PutObjectAclInput struct { // objects. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable bucket. This functionality + // is not supported for Amazon S3 on Outposts. GrantWriteACP *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. This + // functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -217,7 +214,7 @@ func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -281,6 +278,9 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { return err } @@ -317,6 +317,9 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutObjectLegalHold.go b/service/s3/api_op_PutObjectLegalHold.go index 4adbe8cf783..3fa38af3eba 100644 --- a/service/s3/api_op_PutObjectLegalHold.go +++ b/service/s3/api_op_PutObjectLegalHold.go @@ -14,9 +14,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies a legal hold configuration to the specified object. For more -// information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This action is not supported by Amazon S3 on Outposts. +// This operation is not supported by directory buckets. Applies a legal hold +// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { if params == nil { params = &PutObjectLegalHoldInput{} @@ -35,7 +35,9 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalH type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold on. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide @@ -51,12 +53,12 @@ type PutObjectLegalHoldInput struct { // This member is required. Key *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -66,9 +68,9 @@ type PutObjectLegalHoldInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Container element for the legal hold configuration you want to apply to the @@ -77,11 +79,12 @@ type PutObjectLegalHoldInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The version ID of the object that you want to place a legal hold on. @@ -98,7 +101,7 @@ func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -162,6 +165,9 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -198,6 +204,9 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutObjectLockConfiguration.go b/service/s3/api_op_PutObjectLockConfiguration.go index 2bc5717a1b8..49425c8f9f7 100644 --- a/service/s3/api_op_PutObjectLockConfiguration.go +++ b/service/s3/api_op_PutObjectLockConfiguration.go @@ -14,9 +14,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Places an Object Lock configuration on the specified bucket. The rule specified -// in the Object Lock configuration will be applied by default to every new object -// placed in the specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// This operation is not supported by directory buckets. Places an Object Lock +// configuration on the specified bucket. The rule specified in the Object Lock +// configuration will be applied by default to every new object placed in the +// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) // . // - The DefaultRetention settings require both a mode and a period. // - The DefaultRetention period can be either Days or Years but you must select @@ -46,12 +47,12 @@ type PutObjectLockConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -61,9 +62,9 @@ type PutObjectLockConfigurationInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The Object Lock configuration that you want to apply to the specified bucket. @@ -71,11 +72,12 @@ type PutObjectLockConfigurationInput struct { // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // A token to allow Object Lock to be enabled for an existing bucket. @@ -92,7 +94,7 @@ func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParamet type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -156,6 +158,9 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -192,6 +197,9 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutObjectRetention.go b/service/s3/api_op_PutObjectRetention.go index d2fd1920023..5dfb98f3d3a 100644 --- a/service/s3/api_op_PutObjectRetention.go +++ b/service/s3/api_op_PutObjectRetention.go @@ -14,12 +14,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Places an Object Retention configuration on an object. For more information, -// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// This operation is not supported by directory buckets. Places an Object +// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) // . Users or accounts require the s3:PutObjectRetention permission in order to // place an Object Retention configuration on objects. Bypassing a Governance // Retention configuration requires the s3:BypassGovernanceRetention permission. -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { params = &PutObjectRetentionInput{} @@ -38,8 +38,10 @@ func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetent type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object - // Retention configuration to. When using this action with an access point, you - // must direct requests to the access point hostname. The access point hostname + // Retention configuration to. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more @@ -58,12 +60,12 @@ type PutObjectRetentionInput struct { // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention *bool - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -73,18 +75,19 @@ type PutObjectRetentionInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The container element for the Object Retention configuration. @@ -105,7 +108,7 @@ func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -169,6 +172,9 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -205,6 +211,9 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutObjectTagging.go b/service/s3/api_op_PutObjectTagging.go index 37e998a3330..8b42d43d1cb 100644 --- a/service/s3/api_op_PutObjectTagging.go +++ b/service/s3/api_op_PutObjectTagging.go @@ -14,8 +14,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the supplied tag-set to an object that already exists in a bucket. A tag -// is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) +// This operation is not supported by directory buckets. Sets the supplied tag-set +// to an object that already exists in a bucket. A tag is a key-value pair. For +// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) // . You can associate tags with an object by sending a PUT request against the // tagging subresource that is associated with the object. You can retrieve tags by // sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) @@ -59,16 +60,18 @@ func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingI type PutObjectTaggingInput struct { - // The bucket name containing the object. When using this action with an access - // point, you must direct requests to the access point hostname. The access point + // The bucket name containing the object. Access points - When you use this action + // with an access point, you must provide the alias of the access point in place of + // the bucket name or specify the access point ARN. When using the access point + // ARN, you must direct requests to the access point hostname. The access point // hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -88,12 +91,12 @@ type PutObjectTaggingInput struct { // This member is required. Tagging *types.Tagging - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -103,18 +106,19 @@ type PutObjectTaggingInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The versionId of the object that the tag-set will be added to. @@ -194,6 +198,9 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -230,6 +237,9 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_PutPublicAccessBlock.go b/service/s3/api_op_PutPublicAccessBlock.go index 206e9d07228..ab0b5405e88 100644 --- a/service/s3/api_op_PutPublicAccessBlock.go +++ b/service/s3/api_op_PutPublicAccessBlock.go @@ -11,13 +11,14 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// This operation is not supported by directory buckets. Creates or modifies the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketPublicAccessBlock permission. For more +// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or // an object, it checks the PublicAccessBlock configuration for both the bucket // (or the bucket that contains the object) and the bucket owner's account. If the @@ -62,12 +63,12 @@ type PutPublicAccessBlockInput struct { // This member is required. PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -77,9 +78,9 @@ type PutPublicAccessBlockInput struct { // SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde @@ -87,7 +88,7 @@ type PutPublicAccessBlockInput struct { func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutPublicAccessBlockOutput struct { @@ -152,6 +153,9 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -188,6 +192,9 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } diff --git a/service/s3/api_op_RestoreObject.go b/service/s3/api_op_RestoreObject.go index 19b5e6f1e41..a84b5326ce0 100644 --- a/service/s3/api_op_RestoreObject.go +++ b/service/s3/api_op_RestoreObject.go @@ -14,9 +14,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Restores an archived copy of an object back into Amazon S3 This action is not -// supported by Amazon S3 on Outposts. This action performs the following types of -// requests: +// This operation is not supported by directory buckets. Restores an archived copy +// of an object back into Amazon S3 This functionality is not supported for Amazon +// S3 on Outposts. This action performs the following types of requests: // - select - Perform a select query on an archived object // - restore an archive - Restore an archived object // @@ -179,16 +179,18 @@ func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, type RestoreObjectInput struct { - // The bucket name containing the object to restore. When using this action with - // an access point, you must direct requests to the access point hostname. The + // The bucket name containing the object to restore. Access points - When you use + // this action with an access point, you must provide the alias of the access point + // in place of the bucket name or specify the access point ARN. When using the + // access point ARN, you must direct requests to the access point hostname. The // access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -203,28 +205,29 @@ type RestoreObjectInput struct { // This member is required. Key *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // Container for restore job parameters. @@ -244,7 +247,7 @@ func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Indicates the path in the provided S3 output location where Select results will @@ -312,6 +315,9 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_SelectObjectContent.go b/service/s3/api_op_SelectObjectContent.go index 24cb77910b0..888ec9c250b 100644 --- a/service/s3/api_op_SelectObjectContent.go +++ b/service/s3/api_op_SelectObjectContent.go @@ -15,18 +15,20 @@ import ( "sync" ) -// This action filters the contents of an Amazon S3 object based on a simple -// structured query language (SQL) statement. In the request, along with the SQL -// expression, you must also specify a data serialization format (JSON, CSV, or -// Apache Parquet) of the object. Amazon S3 uses this format to parse object data -// into records, and returns only records that match the specified SQL expression. -// You must also specify the data serialization format for the response. This -// action is not supported by Amazon S3 on Outposts. For more information about -// Amazon S3 Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// This operation is not supported by directory buckets. This action filters the +// contents of an Amazon S3 object based on a simple structured query language +// (SQL) statement. In the request, along with the SQL expression, you must also +// specify a data serialization format (JSON, CSV, or Apache Parquet) of the +// object. Amazon S3 uses this format to parse object data into records, and +// returns only records that match the specified SQL expression. You must also +// specify the data serialization format for the response. This functionality is +// not supported for Amazon S3 on Outposts. For more information about Amazon S3 +// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) // and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) -// in the Amazon S3 User Guide. Permissions You must have s3:GetObject permission -// for this operation. Amazon S3 Select does not support anonymous access. For more -// information about permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject +// permission for this operation. Amazon S3 Select does not support anonymous +// access. For more information about permissions, see Specifying Permissions in a +// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to // query objects that have the following format properties: // - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. @@ -129,9 +131,9 @@ type SelectObjectContentInput struct { // This member is required. OutputSerialization *types.OutputSerialization - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Specifies if periodic request progress information should be enabled. @@ -240,6 +242,9 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_UploadPart.go b/service/s3/api_op_UploadPart.go index 5077378ba92..53507fbabdf 100644 --- a/service/s3/api_op_UploadPart.go +++ b/service/s3/api_op_UploadPart.go @@ -16,20 +16,49 @@ import ( "io" ) -// Uploads a part in a multipart upload. In this operation, you provide part data -// in your request. However, you have an option to specify your existing Amazon S3 -// object as a data source for the part you are uploading. To upload a part from an -// existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// Uploads a part in a multipart upload. In this operation, you provide new data +// as a part of an object in your request. However, you have an option to specify +// your existing Amazon S3 object as a data source for the part you are uploading. +// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // ) before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier, that you must include in your +// S3 returns an upload ID, a unique identifier that you must include in your // upload part request. Part numbers can be any number from 1 to 10,000, inclusive. // A part number uniquely identifies a part and also defines its position within // the object being created. If you upload a new part using the same part number // that was used with a previous part, the previously uploaded part is overwritten. // For information about maximum and minimum part sizes and other multipart upload // specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. To ensure that data is not corrupted when +// in the Amazon S3 User Guide. After you initiate multipart upload and upload one +// or more parts, you must either complete or abort multipart upload in order to +// stop getting charged for storage of the uploaded parts. Only after you either +// complete or abort multipart upload, Amazon S3 frees up the parts storage and +// stops charging you for the parts storage. For more information on multipart +// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you +// must make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information on the permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Data integrity General purpose bucket - To ensure that data is not corrupted // traversing the network, specify the Content-MD5 header in the upload part // request. Amazon S3 checks the part data against the provided MD5 value. If they // do not match, Amazon S3 returns an error. If the upload request is signed with @@ -37,51 +66,44 @@ import ( // header as a checksum instead of Content-MD5 . For more information see // Authenticating Requests: Using the Authorization Header (Amazon Web Services // Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) -// . Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging you -// for the parts storage. For more information on multipart uploads, go to -// Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide . For information on the permissions required to use -// the multipart upload API, go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. Server-side encryption is for data encryption at -// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. You have three mutually exclusive options to -// protect data using server-side encryption in Amazon S3, depending on how you -// choose to manage the encryption keys. Specifically, the encryption key options -// are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and -// Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side -// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally -// tell Amazon S3 to encrypt data at rest using server-side encryption with other -// key options. The option you use depends on whether you want to use KMS keys -// (SSE-KMS) or provide your own encryption key (SSE-C). If you choose to provide -// your own encryption key, the request headers you provide in the request must -// match the headers you used in the request to initiate the upload by using -// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon S3 User Guide. Server-side encryption is supported by the S3 -// Multipart Upload actions. Unless you are using a customer-provided encryption -// key (SSE-C), you don't need to specify the encryption parameters in each -// UploadPart request. Instead, you only need to specify the server-side encryption -// parameters in the initial Initiate Multipart request. For more information, see -// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . If you requested server-side encryption using a customer-provided encryption -// key (SSE-C) in your initiate multipart upload request, you must provide -// identical encryption information in each part upload using the following -// headers. +// . Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. Encryption +// - General purpose bucket - Server-side encryption is for data encryption at +// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You have mutually exclusive options to +// protect data using server-side encryption in Amazon S3, depending on how you +// choose to manage the encryption keys. Specifically, the encryption key options +// are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and +// Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side +// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally +// tell Amazon S3 to encrypt data at rest using server-side encryption with other +// key options. The option you use depends on whether you want to use KMS keys +// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is +// supported by the S3 Multipart Upload operations. Unless you are using a +// customer-provided encryption key (SSE-C), you don't need to specify the +// encryption parameters in each UploadPart request. Instead, you only need to +// specify the server-side encryption parameters in the initial Initiate Multipart +// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// . If you request server-side encryption using a customer-provided encryption key +// (SSE-C) in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following request headers. // - x-amz-server-side-encryption-customer-algorithm // - x-amz-server-side-encryption-customer-key // - x-amz-server-side-encryption-customer-key-MD5 +// - Directory bucket - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. // -// UploadPart has the following special errors: -// - Code: NoSuchUpload -// - Cause: The specified multipart upload does not exist. The upload ID might -// be invalid, or the multipart upload might have been aborted or completed. +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. Special errors +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. // - HTTP Status Code: 404 Not Found // - SOAP Fault Code Prefix: Client // -// The following operations are related to UploadPart : +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to UploadPart : // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) @@ -104,16 +126,26 @@ func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns type UploadPartInput struct { - // The name of the bucket to which the multipart upload was initiated. When using - // this action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -142,12 +174,12 @@ type UploadPartInput struct { // Object data. Body io.Reader - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request . For more information, see Checking object - // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must // be the same for all parts and it match the checksum value supplied in the @@ -188,25 +220,27 @@ type UploadPartInput struct { // The base64-encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. + // if object lock parameters are specified. This functionality is not supported for + // directory buckets. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -214,12 +248,14 @@ type UploadPartInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header . This must be the same - // encryption key specified in the initiate multipart upload request. + // encryption key specified in the initiate multipart upload request. This + // functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde @@ -234,34 +270,43 @@ func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -269,25 +314,29 @@ type UploadPartOutput struct { ETag *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header confirming the encryption - // algorithm used. + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide round-trip message - // integrity verification of the customer-provided encryption key. + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -351,6 +400,9 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpUploadPartValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_UploadPartCopy.go b/service/s3/api_op_UploadPartCopy.go index 195e83fb454..1d48a7beaa1 100644 --- a/service/s3/api_op_UploadPartCopy.go +++ b/service/s3/api_op_UploadPartCopy.go @@ -10,72 +10,92 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "time" ) -// Uploads a part by copying data from an existing object as data source. You -// specify the data source by adding the request header x-amz-copy-source in your -// request and a byte range by adding the request header x-amz-copy-source-range -// in your request. For information about maximum and minimum part sizes and other -// multipart upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. Instead of using an existing object as part data, -// you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// action and provide data in your request. You must initiate a multipart upload -// before you can upload any part. In response to your initiate request. Amazon S3 -// returns a unique identifier, the upload ID, that you must include in your upload -// part request. For more information about using the UploadPartCopy operation, -// see the following: -// - For conceptual information about multipart uploads, see Uploading Objects -// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// Uploads a part by copying data from an existing object as data source. To +// specify the data source, you add the request header x-amz-copy-source in your +// request. To specify a byte range, you add the request header +// x-amz-copy-source-range in your request. For information about maximum and +// minimum part sizes and other multipart upload specifications, see Multipart +// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. Instead of copying data from an existing object as +// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// action to upload new data as a part of an object in your request. You must +// initiate a multipart upload before you can upload any part. In response to your +// initiate request, Amazon S3 returns the upload ID, a unique identifier that you +// must include in your upload part request. For conceptual information about +// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. For information about copying objects using a +// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy +// requests must be authenticated and signed by using IAM credentials (access key +// ID and secret access key for the IAM identities). All headers with the x-amz- +// prefix, including x-amz-copy-source , must be signed. For more information, see +// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory buckets - You must use IAM credentials to authenticate and authorize +// your access to the UploadPartCopy API operation, instead of using the temporary +// security credentials through the CreateSession API operation. Amazon Web +// Services CLI or SDKs handles authentication and authorization on your behalf. +// Permissions You must have READ access to the source object and WRITE access to +// the destination bucket. +// - General purpose bucket permissions - You must have the permissions in a +// policy based on the bucket types of your source bucket and destination bucket in +// an UploadPartCopy operation. +// - If the source object is in a general purpose bucket, you must have the +// s3:GetObject permission to read the source object that is being copied. +// - If the destination bucket is a general purpose bucket, you must have the +// s3:PubObject permission to write the object copy to the destination bucket. +// For information about permissions required to use the multipart upload API, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) // in the Amazon S3 User Guide. -// - For information about permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// - Directory bucket permissions - You must have permissions in a bucket policy +// or an IAM identity-based policy based on the source and destination bucket types +// in an UploadPartCopy operation. +// - If the source object that you want to copy is in a directory bucket, you +// must have the s3express:CreateSession permission in the Action element of a +// policy to read the object . By default, the session is in the ReadWrite mode. +// If you want to restrict the access, you can explicitly set the +// s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// - If the copy destination is a directory bucket, you must have the +// s3express:CreateSession permission in the Action element of a policy to write +// the object to the destination. The s3express:SessionMode condition key cannot +// be set to ReadOnly on the copy destination. For example policies, see Example +// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) // in the Amazon S3 User Guide. -// - For information about copying objects using a single atomic action vs. a -// multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. -// - For information about using server-side encryption with customer-provided -// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// Encryption +// - General purpose buckets - For information about using server-side +// encryption with customer-provided encryption keys with the UploadPartCopy +// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // . +// - Directory buckets - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. // -// Note the following additional considerations about the request headers -// x-amz-copy-source-if-match , x-amz-copy-source-if-none-match , -// x-amz-copy-source-if-unmodified-since , and x-amz-copy-source-if-modified-since -// : -// - Consideration 1 - If both of the x-amz-copy-source-if-match and -// x-amz-copy-source-if-unmodified-since headers are present in the request as -// follows: x-amz-copy-source-if-match condition evaluates to true , and; -// x-amz-copy-source-if-unmodified-since condition evaluates to false ; Amazon S3 -// returns 200 OK and copies the data. -// - Consideration 2 - If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request as -// follows: x-amz-copy-source-if-none-match condition evaluates to false , and; -// x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 -// returns 412 Precondition Failed response code. -// -// Versioning If your bucket has versioning enabled, you could have multiple -// versions of the same object. By default, x-amz-copy-source identifies the -// current version of the object to copy. If the current version is a delete marker -// and you don't specify a versionId in the x-amz-copy-source , Amazon S3 returns a -// 404 error, because the object does not exist. If you specify versionId in the -// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an -// HTTP 400 error, because you are not allowed to specify a delete marker as a -// version for the x-amz-copy-source . You can optionally specify a specific -// version of the source object to copy by adding the versionId subresource as -// shown in the following example: x-amz-copy-source: -// /bucket/object?versionId=version id Special errors -// - Code: NoSuchUpload -// - Cause: The specified multipart upload does not exist. The upload ID might -// be invalid, or the multipart upload might have been aborted or completed. +// Special errors +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. // - HTTP Status Code: 404 Not Found -// - Code: InvalidRequest -// - Cause: The specified copy source is not supported as a byte-range copy -// source. +// - Error Code: InvalidRequest +// - Description: The specified copy source is not supported as a byte-range +// copy source. // - HTTP Status Code: 400 Bad Request // -// The following operations are related to UploadPartCopy : +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to UploadPartCopy : // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) @@ -99,15 +119,25 @@ func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput type UploadPartCopyInput struct { - // The bucket name. When using this action with an access point, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When you use this action with Amazon S3 on - // Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on - // Outposts hostname takes the form + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more @@ -132,20 +162,29 @@ type UploadPartCopyInput struct { // reports/january.pdf through access point my-access-point owned by account // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf - // . The value must be URL encoded. Amazon S3 supports copy operations using access - // points only when the source and destination buckets are in the same Amazon Web - // Services Region. Alternatively, for objects accessed through Amazon S3 on - // Outposts, specify the ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 - // in Region us-west-2 , use the URL encoding of + // . The value must be URL encoded. + // - Amazon S3 supports copy operations using Access points only when the source + // and destination buckets are in the same Amazon Web Services Region. + // - Access points are not supported by directory buckets. Alternatively, for + // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as + // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, + // to copy the object reports/january.pdf through outpost my-outpost owned by + // account 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. - // To copy a specific version of an object, append ?versionId= to the value (for - // example, - // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. + // If your bucket has versioning enabled, you could have multiple versions of the + // same object. By default, x-amz-copy-source identifies the current version of + // the source object to copy. To copy a specific version of the source object to + // copy, append ?versionId= to the x-amz-copy-source request header (for example, + // x-amz-copy-source: + // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 + // ). If the current version is a delete marker and you don't specify a versionId + // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found + // error, because the object does not exist. If you specify versionId in the + // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an + // HTTP 400 Bad Request error, because you are not allowed to specify a delete + // marker as a version for the x-amz-copy-source . Directory buckets - S3 + // Versioning isn't enabled and supported for directory buckets. // // This member is required. CopySource *string @@ -166,16 +205,34 @@ type UploadPartCopyInput struct { // This member is required. UploadId *string - // Copies the object if its entity tag (ETag) matches the specified tag. + // Copies the object if its entity tag (ETag) matches the specified tag. If both + // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: x-amz-copy-source-if-match + // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since + // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. + // Copies the object if it has been modified since the specified time. If both of + // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: x-amz-copy-source-if-none-match + // condition evaluates to false , and; x-amz-copy-source-if-modified-since + // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed + // response code. CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified ETag. + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. If both of the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request as + // follows: x-amz-copy-source-if-none-match condition evaluates to false , and; + // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 + // returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. + // Copies the object if it hasn't been modified since the specified time. If both + // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: x-amz-copy-source-if-match + // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since + // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time // The range of bytes to copy from the source object. The range value must use the @@ -186,40 +243,45 @@ type UploadPartCopyInput struct { CopySourceRange *string // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). + // AES256 ). This functionality is not supported when the source object is in a + // directory bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. + // was used when the source object was created. This functionality is not supported + // when the source object is in a directory bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported when the source object is in a directory bucket. CopySourceSSECustomerKeyMD5 *string - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID that + // you provide does not match the actual owner of the destination bucket, the + // request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The account ID of the expected source bucket owner. If the source bucket is - // owned by a different account, the request fails with the HTTP status code 403 - // Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that you + // provide does not match the actual owner of the source bucket, the request fails + // with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the - // source or destination Amazon S3 bucket has Requester Pays enabled, the requester - // will pay for corresponding charges to copy the object. For information about + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about // downloading objects from Requester Pays buckets, see Downloading Objects in // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported when the destination bucket is a + // directory bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -227,12 +289,15 @@ type UploadPartCopyInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. + // encryption key specified in the initiate multipart upload request. This + // functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported when the destination bucket is a directory bucket. SSECustomerKeyMD5 *string noSmithyDocumentSerde @@ -240,42 +305,48 @@ type UploadPartCopyInput struct { func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { p.Bucket = in.Bucket - + p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. BucketKeyEnabled *bool // Container for all response elements. CopyPartResult *types.CopyPartResult // The version of the source object that was copied, if you have enabled - // versioning on the source bucket. + // versioning on the source bucket. This functionality is not supported when the + // source object is in a directory bucket. CopySourceVersionId *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header confirming the encryption - // algorithm used. + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide round-trip message - // integrity verification of the customer-provided encryption key. + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -339,6 +410,9 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { return err } diff --git a/service/s3/api_op_WriteGetObjectResponse.go b/service/s3/api_op_WriteGetObjectResponse.go index 2b9560c91b7..ac90ff7032a 100644 --- a/service/s3/api_op_WriteGetObjectResponse.go +++ b/service/s3/api_op_WriteGetObjectResponse.go @@ -18,9 +18,10 @@ import ( "time" ) -// Passes transformed objects to a GetObject operation when using Object Lambda -// access points. For information about Object Lambda access points, see -// Transforming objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// This operation is not supported by directory buckets. Passes transformed +// objects to a GetObject operation when using Object Lambda access points. For +// information about Object Lambda access points, see Transforming objects with +// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) // in the Amazon S3 User Guide. This operation supports metadata that can be // returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and @@ -220,7 +221,7 @@ type WriteGetObjectResponseInput struct { ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration operation and expiration time of @@ -349,6 +350,9 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { return err } diff --git a/service/s3/bucket_context.go b/service/s3/bucket_context.go new file mode 100644 index 00000000000..860af056aa2 --- /dev/null +++ b/service/s3/bucket_context.go @@ -0,0 +1,47 @@ +package s3 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" +) + +// putBucketContextMiddleware stores the input bucket name within the request context (if +// present) which is required for a variety of custom S3 behaviors +type putBucketContextMiddleware struct{} + +func (*putBucketContextMiddleware) ID() string { + return "putBucketContext" +} + +func (m *putBucketContextMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if bucket, ok := m.bucketFromInput(in.Parameters); ok { + ctx = customizations.SetBucket(ctx, bucket) + } + return next.HandleSerialize(ctx, in) +} + +func (m *putBucketContextMiddleware) bucketFromInput(params interface{}) (string, bool) { + v, ok := params.(bucketer) + if !ok { + return "", false + } + + return v.bucket() +} + +func addPutBucketContextMiddleware(stack *middleware.Stack) error { + // This is essentially a post-Initialize task - only run it once the input + // has received all modifications from that phase. Therefore we add it as + // an early Serialize step. + // + // FUTURE: it would be nice to have explicit phases that only we as SDK + // authors can hook into (such as between phases like this really should + // be) + return stack.Serialize.Add(&putBucketContextMiddleware{}, middleware.Before) +} diff --git a/service/s3/create_mpu_checksum.go b/service/s3/create_mpu_checksum.go new file mode 100644 index 00000000000..5803b9e45ae --- /dev/null +++ b/service/s3/create_mpu_checksum.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "context" + "fmt" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// backfills checksum algorithm onto the context for CreateMultipart upload so +// transfer manager can set a checksum header on the request accordingly for +// s3express requests +type setCreateMPUChecksumAlgorithm struct{} + +func (*setCreateMPUChecksumAlgorithm) ID() string { + return "setCreateMPUChecksumAlgorithm" +} + +func (*setCreateMPUChecksumAlgorithm) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unexpected input type %T", in.Parameters) + } + + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(input.ChecksumAlgorithm)) + return next.HandleSerialize(ctx, in) +} + +func addSetCreateMPUChecksumAlgorithm(s *middleware.Stack) error { + return s.Serialize.Add(&setCreateMPUChecksumAlgorithm{}, middleware.Before) +} diff --git a/service/s3/deserializers.go b/service/s3/deserializers.go index 2e75f800dd2..2be5df30ffa 100644 --- a/service/s3/deserializers.go +++ b/service/s3/deserializers.go @@ -943,6 +943,148 @@ func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMulti return nil } +type awsRestxml_deserializeOpCreateSession struct { +} + +func (*awsRestxml_deserializeOpCreateSession) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateSession(response, &metadata) + } + output := &CreateSessionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCreateSessionOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateSessionOutput + if *v == nil { + sv = &CreateSessionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSessionCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + type awsRestxml_deserializeOpDeleteBucket struct { } @@ -6728,10 +6870,9 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte output := &HeadBucketOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } + err = awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } return out, metadata, err @@ -6780,6 +6921,38 @@ func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, meta } } +func awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(v *HeadBucketOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-access-point-alias"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.AccessPointAlias = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-bucket-location-name"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketLocationName = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-bucket-location-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketLocationType = types.LocationType(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-bucket-region"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketRegion = ptr.String(headerValues[0]) + } + + return nil +} + type awsRestxml_deserializeOpHeadObject struct { } @@ -7941,6 +8114,158 @@ func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, de return nil } +type awsRestxml_deserializeOpListDirectoryBuckets struct { +} + +func (*awsRestxml_deserializeOpListDirectoryBuckets) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListDirectoryBuckets(response, &metadata) + } + output := &ListDirectoryBucketsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListDirectoryBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(v **ListDirectoryBucketsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListDirectoryBucketsOutput + if *v == nil { + sv = &ListDirectoryBucketsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Buckets", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + type awsRestxml_deserializeOpListMultipartUploads struct { } @@ -21203,6 +21528,98 @@ func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types *v = sv return nil } +func awsRestxml_deserializeDocumentSessionCredentials(v **types.SessionCredentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SessionCredentials + if *v == nil { + sv = &types.SessionCredentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentSimplePrefix(v **types.SimplePrefix, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/s3/endpoint_auth_resolver.go b/service/s3/endpoint_auth_resolver.go index c9945587452..6c97ae7eaae 100644 --- a/service/s3/endpoint_auth_resolver.go +++ b/service/s3/endpoint_auth_resolver.go @@ -23,6 +23,13 @@ func (r *endpointAuthResolver) ResolveAuthSchemes( return nil, err } + // canonicalize sigv4-s3express ID + for _, opt := range opts { + if opt.SchemeID == "sigv4-s3express" { + opt.SchemeID = "com.amazonaws.s3#sigv4express" + } + } + // a host of undocumented s3 operations can be done anonymously return append(opts, &smithyauth.Option{ SchemeID: smithyauth.SchemeIDAnonymous, diff --git a/service/s3/endpoints.go b/service/s3/endpoints.go index 4ff33428d96..fb70e7b3123 100644 --- a/service/s3/endpoints.go +++ b/service/s3/endpoints.go @@ -9,8 +9,10 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/aws-sdk-go-v2/internal/endpoints" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" @@ -338,135 +340,957 @@ type EndpointParameters struct { // // AWS::S3::UseArnRegion UseArnRegion *bool + + // Internal parameter to indicate whether S3Express operation should use control + // plane, (ex. CreateBucket) + // + // Parameter is required. + UseS3ExpressControlEndpoint *bool + + // Internal parameter to indicate whether S3Express session auth should be + // disabled + // + // Parameter is required. + DisableS3ExpressSessionAuth *bool +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.Accelerate == nil { + return fmt.Errorf("parameter Accelerate is required") + } + + if p.DisableMultiRegionAccessPoints == nil { + return fmt.Errorf("parameter DisableMultiRegionAccessPoints is required") + } + + if p.ForcePathStyle == nil { + return fmt.Errorf("parameter ForcePathStyle is required") + } + + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + if p.UseGlobalEndpoint == nil { + return fmt.Errorf("parameter UseGlobalEndpoint is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.Accelerate == nil { + p.Accelerate = ptr.Bool(false) + } + + if p.DisableMultiRegionAccessPoints == nil { + p.DisableMultiRegionAccessPoints = ptr.Bool(false) + } + + if p.ForcePathStyle == nil { + p.ForcePathStyle = ptr.Bool(false) + } + + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + + if p.UseGlobalEndpoint == nil { + p.UseGlobalEndpoint = ptr.Bool(false) + } + return p } -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.Accelerate == nil { - return fmt.Errorf("parameter Accelerate is required") - } +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseFIPS := *params.UseFIPS + _UseDualStack := *params.UseDualStack + _ForcePathStyle := *params.ForcePathStyle + _Accelerate := *params.Accelerate + _UseGlobalEndpoint := *params.UseGlobalEndpoint + _DisableMultiRegionAccessPoints := *params.DisableMultiRegionAccessPoints + + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if _Accelerate == true { + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Accelerate cannot be used with FIPS") + } + } + if _UseDualStack == true { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + return endpoint, fmt.Errorf("endpoint rule error, %s", "Cannot set dual-stack in combination with a custom endpoint.") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with FIPS") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with S3 Accelerate") + } + } + if _UseFIPS == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _partitionResult.Name == "aws-cn" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Partition does not support FIPS") + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := rulesfn.SubString(_Bucket, 0, 6, true); exprVal != nil { + _bucketSuffix := *exprVal + _ = _bucketSuffix + if _bucketSuffix == "--x-s3" { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { + _UseS3ExpressControlEndpoint := *exprVal + _ = _UseS3ExpressControlEndpoint + if _UseS3ExpressControlEndpoint == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + if !(params.Endpoint != nil) { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") - if p.DisableMultiRegionAccessPoints == nil { - return fmt.Errorf("parameter DisableMultiRegionAccessPoints is required") - } + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() - if p.ForcePathStyle == nil { - return fmt.Errorf("parameter ForcePathStyle is required") - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") - if p.UseGlobalEndpoint == nil { - return fmt.Errorf("parameter UseGlobalEndpoint is required") - } + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() - return nil -} + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.Accelerate == nil { - p.Accelerate = ptr.Bool(false) - } + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) - if p.DisableMultiRegionAccessPoints == nil { - p.DisableMultiRegionAccessPoints = ptr.Bool(false) - } + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") - if p.ForcePathStyle == nil { - p.ForcePathStyle = ptr.Bool(false) - } + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + } + if !(params.Bucket != nil) { + if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { + _UseS3ExpressControlEndpoint := *exprVal + _ = _UseS3ExpressControlEndpoint + if _UseS3ExpressControlEndpoint == true { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) - if p.UseGlobalEndpoint == nil { - p.UseGlobalEndpoint = ptr.Bool(false) - } - return p -} + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() -// resolver provides the implementation for resolving endpoints. -type resolver struct{} + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseFIPS := *params.UseFIPS - _UseDualStack := *params.UseDualStack - _ForcePathStyle := *params.ForcePathStyle - _Accelerate := *params.Accelerate - _UseGlobalEndpoint := *params.UseGlobalEndpoint - _DisableMultiRegionAccessPoints := *params.DisableMultiRegionAccessPoints + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if _Accelerate == true { - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Accelerate cannot be used with FIPS") - } - } - if _UseDualStack == true { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - return endpoint, fmt.Errorf("endpoint rule error, %s", "Cannot set dual-stack in combination with a custom endpoint.") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with FIPS") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with S3 Accelerate") - } - } - if _UseFIPS == true { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _partitionResult.Name == "aws-cn" { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Partition does not support FIPS") + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil } } } @@ -2853,8 +3677,8 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") } if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { - _var_244 := *exprVal - _ = _var_244 + _var_275 := *exprVal + _ = _var_275 return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") } if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { @@ -3071,8 +3895,8 @@ func (r *resolver) ResolveEndpoint( } if _ForcePathStyle == true { if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _var_257 := *exprVal - _ = _var_257 + _var_288 := *exprVal + _ = _var_288 return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") } } @@ -4853,6 +5677,8 @@ func bindEndpointParams(input interface{}, options Options) *EndpointParameters params.DisableMultiRegionAccessPoints = aws.Bool(options.DisableMultiRegionAccessPoints) params.UseArnRegion = aws.Bool(options.UseARNRegion) + params.DisableS3ExpressSessionAuth = options.DisableS3ExpressSessionAuth + if b, ok := input.(endpointParamsBinder); ok { b.bindEndpointParams(params) } @@ -4911,5 +5737,8 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + backend := s3cust.GetPropertiesBackend(&endpt.Properties) + ctx = internalcontext.SetS3Backend(ctx, backend) + return next.HandleFinalize(ctx, in) } diff --git a/service/s3/endpoints_test.go b/service/s3/endpoints_test.go index 4f32f617279..3171d4131c1 100644 --- a/service/s3/endpoints_test.go +++ b/service/s3/endpoints_test.go @@ -12214,3 +12214,1212 @@ func TestEndpointCase272(t *testing.T) { t.Errorf("expect properties to match\n%s", diff) } } + +// Data Plane with short AZ +func TestEndpointCase273(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--use1-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--use1-az1--x-s3.s3express-use1-az1.us-east-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data Plane with short AZ fips +func TestEndpointCase274(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--use1-az1--x-s3"), + UseFIPS: ptr.Bool(true), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--use1-az1--x-s3.s3express-fips-use1-az1.us-east-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data Plane with long AZ +func TestEndpointCase275(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-northeast-1"), + Bucket: ptr.String("mybucket--apne1-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--apne1-az1--x-s3.s3express-apne1-az1.ap-northeast-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "ap-northeast-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data Plane with long AZ fips +func TestEndpointCase276(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-northeast-1"), + Bucket: ptr.String("mybucket--apne1-az1--x-s3"), + UseFIPS: ptr.Bool(true), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--apne1-az1--x-s3.s3express-fips-apne1-az1.ap-northeast-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "ap-northeast-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Control plane with short AZ bucket +func TestEndpointCase277(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--use1-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(true), + DisableS3ExpressSessionAuth: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://s3express-control.us-east-1.amazonaws.com/mybucket--use1-az1--x-s3") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Control plane with short AZ bucket and fips +func TestEndpointCase278(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--use1-az1--x-s3"), + UseFIPS: ptr.Bool(true), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(true), + DisableS3ExpressSessionAuth: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://s3express-control-fips.us-east-1.amazonaws.com/mybucket--use1-az1--x-s3") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Control plane without bucket +func TestEndpointCase279(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(true), + DisableS3ExpressSessionAuth: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://s3express-control.us-east-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Control plane without bucket and fips +func TestEndpointCase280(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + UseFIPS: ptr.Bool(true), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(true), + DisableS3ExpressSessionAuth: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://s3express-control-fips.us-east-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data Plane sigv4 auth with short AZ +func TestEndpointCase281(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("mybucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + DisableS3ExpressSessionAuth: ptr.Bool(true), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-west-2") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data Plane sigv4 auth with short AZ fips +func TestEndpointCase282(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("mybucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(true), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + DisableS3ExpressSessionAuth: ptr.Bool(true), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--usw2-az1--x-s3.s3express-fips-usw2-az1.us-west-2.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-west-2") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data Plane sigv4 auth with long AZ +func TestEndpointCase283(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-northeast-1"), + Bucket: ptr.String("mybucket--apne1-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + DisableS3ExpressSessionAuth: ptr.Bool(true), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--apne1-az1--x-s3.s3express-apne1-az1.ap-northeast-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "ap-northeast-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data Plane sigv4 auth with long AZ fips +func TestEndpointCase284(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("ap-northeast-1"), + Bucket: ptr.String("mybucket--apne1-az1--x-s3"), + UseFIPS: ptr.Bool(true), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + DisableS3ExpressSessionAuth: ptr.Bool(true), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--apne1-az1--x-s3.s3express-fips-apne1-az1.ap-northeast-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "ap-northeast-1") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Control Plane host override +func TestEndpointCase285(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("mybucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(true), + DisableS3ExpressSessionAuth: ptr.Bool(true), + Endpoint: ptr.String("https://custom.com"), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--usw2-az1--x-s3.custom.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-west-2") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Control Plane host override no bucket +func TestEndpointCase286(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(true), + DisableS3ExpressSessionAuth: ptr.Bool(true), + Endpoint: ptr.String("https://custom.com"), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://custom.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-west-2") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data plane host override non virtual session auth +func TestEndpointCase287(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("mybucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + Endpoint: ptr.String("https://10.0.0.1"), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://10.0.0.1/mybucket--usw2-az1--x-s3") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-west-2") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Control Plane host override ip +func TestEndpointCase288(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("mybucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(true), + DisableS3ExpressSessionAuth: ptr.Bool(true), + Endpoint: ptr.String("https://10.0.0.1"), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://10.0.0.1/mybucket--usw2-az1--x-s3") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-west-2") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// Data plane host override +func TestEndpointCase289(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("mybucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + Endpoint: ptr.String("https://custom.com"), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://mybucket--usw2-az1--x-s3.custom.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-west-2") + + smithyhttp.SetDisableDoubleEncoding(&sp, true) + return sp + }(), + }, + }) + out.Set("backend", "S3Express") + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != "" { + t.Errorf("expect headers to match\n%s", diff) + } + + if diff := cmp.Diff(expectEndpoint.Properties, result.Properties, + cmp.AllowUnexported(smithy.Properties{}), + ); diff != "" { + t.Errorf("expect properties to match\n%s", diff) + } +} + +// bad format error +func TestEndpointCase290(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--usaz1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := "Unrecognized S3Express bucket name format.", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) + } +} + +// bad format error no session auth +func TestEndpointCase291(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--usaz1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + DisableS3ExpressSessionAuth: ptr.Bool(true), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := "Unrecognized S3Express bucket name format.", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) + } +} + +// dual-stack error +func TestEndpointCase292(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--use1-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(true), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := "S3Express does not support Dual-stack.", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) + } +} + +// accelerate error +func TestEndpointCase293(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("mybucket--use1-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(true), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := "S3Express does not support S3 Accelerate.", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) + } +} + +// Data plane bucket format error +func TestEndpointCase294(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-east-1"), + Bucket: ptr.String("my.bucket--use1-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + UseS3ExpressControlEndpoint: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := "S3Express bucket name is not a valid virtual hostable name.", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) + } +} + +// host override data plane bucket error session auth +func TestEndpointCase295(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("my.bucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + Endpoint: ptr.String("https://custom.com"), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := "S3Express bucket name is not a valid virtual hostable name.", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) + } +} + +// host override data plane bucket error +func TestEndpointCase296(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("us-west-2"), + Bucket: ptr.String("my.bucket--usw2-az1--x-s3"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + Accelerate: ptr.Bool(false), + Endpoint: ptr.String("https://custom.com"), + DisableS3ExpressSessionAuth: ptr.Bool(true), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err == nil { + t.Fatalf("expect error, got none") + } + if e, a := "S3Express bucket name is not a valid virtual hostable name.", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v error in %v", e, a) + } +} diff --git a/service/s3/express.go b/service/s3/express.go new file mode 100644 index 00000000000..bbac9ca270e --- /dev/null +++ b/service/s3/express.go @@ -0,0 +1,9 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" +) + +// ExpressCredentialsProvider retrieves credentials for operations against the +// S3Express storage class. +type ExpressCredentialsProvider = customizations.S3ExpressCredentialsProvider diff --git a/service/s3/express_default.go b/service/s3/express_default.go new file mode 100644 index 00000000000..3a55e26bc93 --- /dev/null +++ b/service/s3/express_default.go @@ -0,0 +1,123 @@ +package s3 + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" + "github.com/aws/smithy-go/container/private/cache" + "github.com/aws/smithy-go/container/private/cache/lru" +) + +const s3ExpressCacheCap = 100 + +const s3ExpressRefreshWindow = 1 * time.Minute + +// The default S3Express provider uses an LRU cache with a capacity of 100. +// +// Credentials will be refreshed asynchronously when a Retrieve() call is made +// for cached credentials within an expiry window (1 minute, currently +// non-configurable). +type defaultS3ExpressCredentialsProvider struct { + mu sync.Mutex + sf singleflight.Group + + client createSessionAPIClient + credsCache cache.Cache + refreshWindow time.Duration +} + +type createSessionAPIClient interface { + CreateSession(context.Context, *CreateSessionInput, ...func(*Options)) (*CreateSessionOutput, error) +} + +func newDefaultS3ExpressCredentialsProvider() *defaultS3ExpressCredentialsProvider { + return &defaultS3ExpressCredentialsProvider{ + credsCache: lru.New(s3ExpressCacheCap), + refreshWindow: s3ExpressRefreshWindow, + } +} + +func (p *defaultS3ExpressCredentialsProvider) Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) { + p.mu.Lock() + defer p.mu.Unlock() + + creds, ok := p.getCacheCredentials(bucket) + if !ok || creds.Expired() { + return p.awaitDoChanRetrieve(ctx, bucket) + } + + if creds.Expires.Sub(sdk.NowTime()) <= p.refreshWindow { + p.doChanRetrieve(ctx, bucket) + } + + return *creds, nil +} + +func (p *defaultS3ExpressCredentialsProvider) doChanRetrieve(ctx context.Context, bucket string) <-chan singleflight.Result { + return p.sf.DoChan(bucket, func() (interface{}, error) { + return p.retrieve(ctx, bucket) + }) +} + +func (p *defaultS3ExpressCredentialsProvider) awaitDoChanRetrieve(ctx context.Context, bucket string) (aws.Credentials, error) { + ch := p.doChanRetrieve(ctx, bucket) + + select { + case r := <-ch: + return r.Val.(aws.Credentials), r.Err + case <-ctx.Done(): + return aws.Credentials{}, errors.New("s3express retrieve credentials canceled") + } +} + +func (p *defaultS3ExpressCredentialsProvider) retrieve(ctx context.Context, bucket string) (aws.Credentials, error) { + resp, err := p.client.CreateSession(ctx, &CreateSessionInput{ + Bucket: aws.String(bucket), + }) + if err != nil { + return aws.Credentials{}, err + } + + creds, err := credentialsFromResponse(resp) + if err != nil { + return aws.Credentials{}, err + } + + p.putCacheCredentials(bucket, creds) + return *creds, nil +} + +func (p *defaultS3ExpressCredentialsProvider) getCacheCredentials(bucket string) (*aws.Credentials, bool) { + if v, ok := p.credsCache.Get(bucket); ok { + return v.(*aws.Credentials), true + } + + return nil, false +} + +func (p *defaultS3ExpressCredentialsProvider) putCacheCredentials(bucket string, creds *aws.Credentials) { + p.credsCache.Put(bucket, creds) +} + +func credentialsFromResponse(o *CreateSessionOutput) (*aws.Credentials, error) { + if o.Credentials == nil { + return nil, errors.New("s3express session credentials unset") + } + + if o.Credentials.AccessKeyId == nil || o.Credentials.SecretAccessKey == nil || o.Credentials.SessionToken == nil || o.Credentials.Expiration == nil { + return nil, errors.New("s3express session credentials missing one or more required fields") + } + + return &aws.Credentials{ + AccessKeyID: *o.Credentials.AccessKeyId, + SecretAccessKey: *o.Credentials.SecretAccessKey, + SessionToken: *o.Credentials.SessionToken, + CanExpire: true, + Expires: *o.Credentials.Expiration, + }, nil +} diff --git a/service/s3/express_resolve.go b/service/s3/express_resolve.go new file mode 100644 index 00000000000..2c357a2df30 --- /dev/null +++ b/service/s3/express_resolve.go @@ -0,0 +1,29 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" +) + +// If the caller hasn't provided an S3Express provider, we use our default +// which will grab a reference to the S3 client itself in finalization. +func resolveExpressCredentials(o *Options) { + if o.ExpressCredentials == nil { + o.ExpressCredentials = newDefaultS3ExpressCredentialsProvider() + } +} + +// Config finalizer: if we're using the default S3Express implementation, +// grab a reference to the client for its CreateSession API. +func finalizeExpressCredentials(o *Options, c *Client) { + if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { + p.client = c + } +} + +// NewFromConfig resolver: pull from opaque sources if it exists. +func resolveDisableExpressAuth(cfg aws.Config, o *Options) { + if v, ok := customizations.ResolveDisableExpressAuth(cfg.ConfigSources); ok { + o.DisableS3ExpressSessionAuth = &v + } +} diff --git a/service/s3/express_test.go b/service/s3/express_test.go new file mode 100644 index 00000000000..1a2f95bef75 --- /dev/null +++ b/service/s3/express_test.go @@ -0,0 +1,174 @@ +package s3 + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) + +type mockCreateSession struct { + wg sync.WaitGroup + + calls []mockCreateSessionCall + times int +} + +type mockCreateSessionCall struct { + output *CreateSessionOutput + err error +} + +func (m *mockCreateSession) expectCalled(t *testing.T, times int) { + if m.times != times { + t.Errorf("expected %d calls to CreateSession, got %d", times, m.times) + } +} + +func (m *mockCreateSession) CreateSession(context.Context, *CreateSessionInput, ...func(*Options)) (*CreateSessionOutput, error) { + defer m.wg.Done() + o := m.calls[m.times] + m.times++ + return o.output, o.err +} + +func TestS3Express_Retrieve(t *testing.T) { + sdk.NowTime = func() time.Time { + return time.Unix(0, 0) + } + mockClient := &mockCreateSession{ + calls: []mockCreateSessionCall{ + { + output: &CreateSessionOutput{ + Credentials: &types.SessionCredentials{ + AccessKeyId: aws.String("AccessKeyId-0"), + Expiration: aws.Time(time.Unix(3600, 0).UTC()), + SecretAccessKey: aws.String("SecretAccessKey-0"), + SessionToken: aws.String("SessionToken-0"), + }, + }, + }, + { + output: &CreateSessionOutput{ + Credentials: &types.SessionCredentials{ + AccessKeyId: aws.String("AccessKeyId-1"), + Expiration: aws.Time(time.Unix(7200, 0).UTC()), + SecretAccessKey: aws.String("SecretAccessKey-1"), + SessionToken: aws.String("SessionToken-1"), + }, + }, + }, + }, + } + + c := newDefaultS3ExpressCredentialsProvider() + c.client = mockClient + + mockClient.wg.Add(3) + c0, err := c.Retrieve(context.Background(), "bucket-0") + if err != nil { + t.Errorf("expected no error, got %v", err) + } + c1, err := c.Retrieve(context.Background(), "bucket-1") + if err != nil { + t.Errorf("expected no error, got %v", err) + } + c2, err := c.Retrieve(context.Background(), "bucket-0") + if err != nil { + t.Errorf("expected no error, got %v", err) + } + + expected0 := aws.Credentials{ + AccessKeyID: "AccessKeyId-0", + SecretAccessKey: "SecretAccessKey-0", + SessionToken: "SessionToken-0", + CanExpire: true, + Expires: time.Unix(3600, 0).UTC(), + } + expected1 := aws.Credentials{ + AccessKeyID: "AccessKeyId-1", + SecretAccessKey: "SecretAccessKey-1", + SessionToken: "SessionToken-1", + CanExpire: true, + Expires: time.Unix(7200, 0).UTC(), + } + + // one should have been a cache hit + mockClient.expectCalled(t, 2) + if expected0 != c0 { + t.Errorf("expected credentials %v, got %v", expected0, c0) + } + if expected0 != c2 { + t.Errorf("expected credentials %v, got %v", expected0, c2) + } + if expected1 != c1 { + t.Errorf("expected credentials %v, got %v", expected1, c1) + } +} + +func TestS3Express_AsyncRefresh(t *testing.T) { + sdk.NowTime = func() time.Time { + return time.Unix(0, 0) + } + mockClient := &mockCreateSession{ + calls: []mockCreateSessionCall{ + { + output: &CreateSessionOutput{ + Credentials: &types.SessionCredentials{ + AccessKeyId: aws.String("AccessKeyId-0"), + Expiration: aws.Time(time.Unix(30, 0).UTC()), + SecretAccessKey: aws.String("SecretAccessKey-0"), + SessionToken: aws.String("SessionToken-0"), + }, + }, + }, + { + output: &CreateSessionOutput{ + Credentials: &types.SessionCredentials{ + AccessKeyId: aws.String("AccessKeyId-0"), + Expiration: aws.Time(time.Unix(3600, 0).UTC()), + SecretAccessKey: aws.String("SecretAccessKey-0"), + SessionToken: aws.String("SessionToken-0"), + }, + }, + }, + }, + } + + c := newDefaultS3ExpressCredentialsProvider() + c.client = mockClient + + mockClient.wg.Add(2) + c0, err := c.Retrieve(context.Background(), "bucket-0") + if err != nil { + t.Errorf("expected no error, got %v", err) + } + c1, err := c.Retrieve(context.Background(), "bucket-0") + if err != nil { + t.Errorf("expected no error, got %v", err) + } + mockClient.wg.Wait() // block on the async retrieve that we've now triggered + + // the first set should still be returned the 2nd time since it's still valid + expected := aws.Credentials{ + AccessKeyID: "AccessKeyId-0", + SecretAccessKey: "SecretAccessKey-0", + SessionToken: "SessionToken-0", + CanExpire: true, + Expires: time.Unix(30, 0).UTC(), + } + + // 2nd call should happen due to refresh window + mockClient.expectCalled(t, 2) + + if expected != c0 { + t.Errorf("expected credentials %v, got %v", expected, c0) + } + if expected != c1 { + t.Errorf("expected credentials %v, got %v", expected, c1) + } +} diff --git a/service/s3/generated.json b/service/s3/generated.json index cdc6f0f1b99..4e666764fb4 100644 --- a/service/s3/generated.json +++ b/service/s3/generated.json @@ -20,6 +20,7 @@ "api_op_CopyObject.go", "api_op_CreateBucket.go", "api_op_CreateMultipartUpload.go", + "api_op_CreateSession.go", "api_op_DeleteBucket.go", "api_op_DeleteBucketAnalyticsConfiguration.go", "api_op_DeleteBucketCors.go", @@ -73,6 +74,7 @@ "api_op_ListBucketInventoryConfigurations.go", "api_op_ListBucketMetricsConfigurations.go", "api_op_ListBuckets.go", + "api_op_ListDirectoryBuckets.go", "api_op_ListMultipartUploads.go", "api_op_ListObjectVersions.go", "api_op_ListObjects.go", diff --git a/service/s3/internal/customizations/context.go b/service/s3/internal/customizations/context.go new file mode 100644 index 00000000000..91b8fde0d74 --- /dev/null +++ b/service/s3/internal/customizations/context.go @@ -0,0 +1,21 @@ +package customizations + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type bucketKey struct{} + +// SetBucket stores a bucket name within the request context, which is required +// for a variety of custom S3 behaviors. +func SetBucket(ctx context.Context, bucket string) context.Context { + return middleware.WithStackValue(ctx, bucketKey{}, bucket) +} + +// GetBucket retrieves a stored bucket name within a context. +func GetBucket(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, bucketKey{}).(string) + return v +} diff --git a/service/s3/internal/customizations/express.go b/service/s3/internal/customizations/express.go new file mode 100644 index 00000000000..8cc0b36248c --- /dev/null +++ b/service/s3/internal/customizations/express.go @@ -0,0 +1,44 @@ +package customizations + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// S3ExpressCredentialsProvider retrieves credentials for the S3Express storage +// class. +type S3ExpressCredentialsProvider interface { + Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) +} + +// ExpressIdentityResolver retrieves identity for the S3Express storage class. +type ExpressIdentityResolver struct { + Provider S3ExpressCredentialsProvider +} + +var _ (auth.IdentityResolver) = (*ExpressIdentityResolver)(nil) + +// GetIdentity retrieves AWS credentials using the underlying provider. +func (v *ExpressIdentityResolver) GetIdentity(ctx context.Context, props smithy.Properties) ( + auth.Identity, error, +) { + bucket, ok := GetIdentityPropertiesBucket(&props) + if !ok { + bucket = GetBucket(ctx) + } + if bucket == "" { + return nil, fmt.Errorf("bucket name is missing") + } + + creds, err := v.Provider.Retrieve(ctx, bucket) + if err != nil { + return nil, fmt.Errorf("get credentials: %v", err) + } + + return &internalauthsmithy.CredentialsAdapter{Credentials: creds}, nil +} diff --git a/service/s3/internal/customizations/express_config.go b/service/s3/internal/customizations/express_config.go new file mode 100644 index 00000000000..bb22d3474d6 --- /dev/null +++ b/service/s3/internal/customizations/express_config.go @@ -0,0 +1,18 @@ +package customizations + +type s3DisableExpressAuthProvider interface { + GetS3DisableExpressAuth() (bool, bool) +} + +// ResolveDisableExpressAuth pulls S3DisableExpressAuth setting from config +// sources. +func ResolveDisableExpressAuth(configs []interface{}) (value bool, exists bool) { + for _, cfg := range configs { + if p, ok := cfg.(s3DisableExpressAuthProvider); ok { + if value, exists = p.GetS3DisableExpressAuth(); exists { + break + } + } + } + return +} diff --git a/service/s3/internal/customizations/express_default_checksum.go b/service/s3/internal/customizations/express_default_checksum.go new file mode 100644 index 00000000000..cf3ff5966c9 --- /dev/null +++ b/service/s3/internal/customizations/express_default_checksum.go @@ -0,0 +1,42 @@ +package customizations + +import ( + "context" + "fmt" + + ictx "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + "github.com/aws/smithy-go/middleware" +) + +type expressDefaultChecksumMiddleware struct{} + +func (*expressDefaultChecksumMiddleware) ID() string { + return "expressDefaultChecksum" +} + +func (*expressDefaultChecksumMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if ictx.GetS3Backend(ctx) == ictx.S3BackendS3Express && ictx.GetChecksumInputAlgorithm(ctx) == "" { + ctx = ictx.SetChecksumInputAlgorithm(ctx, string(checksum.AlgorithmCRC32)) + } + return next.HandleFinalize(ctx, in) +} + +// AddExpressDefaultChecksumMiddleware appends a step to default to CRC32 for +// S3Express requests. This should only be applied to operations where a +// checksum is required (e.g. DeleteObject). +func AddExpressDefaultChecksumMiddleware(s *middleware.Stack) error { + err := s.Finalize.Insert( + &expressDefaultChecksumMiddleware{}, + "AWSChecksum:ComputeInputPayloadChecksum", + middleware.Before, + ) + if err != nil { + return fmt.Errorf("add expressDefaultChecksum: %v", err) + } + return nil +} diff --git a/service/s3/internal/customizations/express_properties.go b/service/s3/internal/customizations/express_properties.go new file mode 100644 index 00000000000..171de461305 --- /dev/null +++ b/service/s3/internal/customizations/express_properties.go @@ -0,0 +1,21 @@ +package customizations + +import "github.com/aws/smithy-go" + +// GetPropertiesBackend returns a resolved endpoint backend from the property +// set. +func GetPropertiesBackend(p *smithy.Properties) string { + v, _ := p.Get("backend").(string) + return v +} + +// GetIdentityPropertiesBucket returns the S3 bucket from identity properties. +func GetIdentityPropertiesBucket(ip *smithy.Properties) (string, bool) { + v, ok := ip.Get(bucketKey{}).(string) + return v, ok +} + +// SetIdentityPropertiesBucket sets the S3 bucket to identity properties. +func SetIdentityPropertiesBucket(ip *smithy.Properties, bucket string) { + ip.Set(bucketKey{}, bucket) +} diff --git a/service/s3/internal/customizations/express_signer.go b/service/s3/internal/customizations/express_signer.go new file mode 100644 index 00000000000..545e5b220d5 --- /dev/null +++ b/service/s3/internal/customizations/express_signer.go @@ -0,0 +1,109 @@ +package customizations + +import ( + "context" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" +) + +const ( + s3ExpressSignerVersion = "com.amazonaws.s3#sigv4express" + headerAmzSessionToken = "x-amz-s3session-token" +) + +// adapts a v4 signer for S3Express +type s3ExpressSignerAdapter struct { + v4 v4.HTTPSigner +} + +// SignHTTP performs S3Express signing on a request, which is identical to +// SigV4 signing save for an additional header containing the S3Express +// session token. +func (s *s3ExpressSignerAdapter) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error { + r.Header.Set(headerAmzSessionToken, credentials.SessionToken) + optFns = append(optFns, func(o *v4.SignerOptions) { + o.DisableSessionToken = true + }) + return s.v4.SignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) +} + +// adapts S3ExpressCredentialsProvider to the standard AWS +// CredentialsProvider interface +type s3ExpressCredentialsAdapter struct { + provider S3ExpressCredentialsProvider + bucket string +} + +func (c *s3ExpressCredentialsAdapter) Retrieve(ctx context.Context) (aws.Credentials, error) { + return c.provider.Retrieve(ctx, c.bucket) +} + +// S3ExpressSignHTTPRequestMiddleware signs S3 S3Express requests. +// +// This is NOT mutually exclusive with existing v4 or v4a signer handling on +// the stack itself, but only one handler will actually perform signing based +// on the provided signing version in the context. +type S3ExpressSignHTTPRequestMiddleware struct { + Credentials S3ExpressCredentialsProvider + Signer v4.HTTPSigner + LogSigning bool +} + +// ID identifies S3ExpressSignHTTPRequestMiddleware. +func (*S3ExpressSignHTTPRequestMiddleware) ID() string { + return "S3ExpressSigning" +} + +// HandleFinalize will sign the request if the S3Express signer has been +// selected. +func (m *S3ExpressSignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if GetSignerVersion(ctx) != s3ExpressSignerVersion { + return next.HandleFinalize(ctx, in) + } + + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: m.credentialsAdapter(ctx), + Signer: m.signerAdapter(), + LogSigning: m.LogSigning, + }) + return mw.HandleFinalize(ctx, in, next) +} + +func (m *S3ExpressSignHTTPRequestMiddleware) credentialsAdapter(ctx context.Context) aws.CredentialsProvider { + return &s3ExpressCredentialsAdapter{ + provider: m.Credentials, + bucket: GetBucket(ctx), + } +} + +func (m *S3ExpressSignHTTPRequestMiddleware) signerAdapter() v4.HTTPSigner { + return &s3ExpressSignerAdapter{v4: m.Signer} +} + +type s3ExpressPresignerAdapter struct { + v4 v4.HTTPPresigner +} + +// SignHTTP performs S3Express signing on a request, which is identical to +// SigV4 signing save for an additional header containing the S3Express +// session token. +func (s *s3ExpressPresignerAdapter) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) ( + string, http.Header, error, +) { + r.Header.Set(headerAmzSessionToken, credentials.SessionToken) + optFns = append(optFns, func(o *v4.SignerOptions) { + o.DisableSessionToken = true + }) + return s.v4.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) +} + +var ( + _ aws.CredentialsProvider = &s3ExpressCredentialsAdapter{} + _ v4.HTTPSigner = &s3ExpressSignerAdapter{} +) diff --git a/service/s3/internal/customizations/express_signer_smithy.go b/service/s3/internal/customizations/express_signer_smithy.go new file mode 100644 index 00000000000..e3ec7f01106 --- /dev/null +++ b/service/s3/internal/customizations/express_signer_smithy.go @@ -0,0 +1,61 @@ +package customizations + +import ( + "context" + "fmt" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ExpressSigner signs requests for the sigv4-s3express auth scheme. +// +// This signer respects the aws.auth#sigv4 properties for signing name and +// region. +type ExpressSigner struct { + Signer v4.HTTPSigner + Logger logging.Logger + LogSigning bool +} + +var _ (smithyhttp.Signer) = (*ExpressSigner)(nil) + +// SignRequest signs the request with the provided identity. +func (v *ExpressSigner) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { + ca, ok := identity.(*internalauthsmithy.CredentialsAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + name, ok := smithyhttp.GetSigV4SigningName(&props) + if !ok { + return fmt.Errorf("sigv4 signing name is required for s3express variant") + } + + region, ok := smithyhttp.GetSigV4SigningRegion(&props) + if !ok { + return fmt.Errorf("sigv4 signing region is required for s3express variant") + } + + hash := v4.GetPayloadHash(ctx) + + r.Header.Set(headerAmzSessionToken, ca.Credentials.SessionToken) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + o.DisableSessionToken = true + + o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) + + o.Logger = v.Logger + o.LogSigning = v.LogSigning + }) + if err != nil { + return fmt.Errorf("sign http: %v", err) + } + + return nil +} diff --git a/service/s3/internal/customizations/signer_wrapper.go b/service/s3/internal/customizations/signer_wrapper.go index c79a5aba4f9..cc2bf9c1313 100644 --- a/service/s3/internal/customizations/signer_wrapper.go +++ b/service/s3/internal/customizations/signer_wrapper.go @@ -77,15 +77,21 @@ func (s *SignHTTPRequestMiddleware) ID() string { return "Signing" } -// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +// HandleFinalize will take the provided input and handle signing for either +// SigV4 or SigV4A as called for. func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - // fetch signer type from context - signerVersion := GetSignerVersion(ctx) + sv := GetSignerVersion(ctx) - // SigV4a - if strings.EqualFold(signerVersion, v4a.Version) { + if strings.EqualFold(sv, v4.Version) { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: s.credentialsProvider, + Signer: s.v4Signer, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } else if strings.EqualFold(sv, v4a.Version) { v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider) if !ok { return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") @@ -98,13 +104,8 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl }) return mw.HandleFinalize(ctx, in, next) } - // SigV4 - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: s.credentialsProvider, - Signer: s.v4Signer, - LogSigning: s.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) + + return next.HandleFinalize(ctx, in) } // RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already @@ -124,6 +125,7 @@ func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignH // PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. type PresignHTTPRequestMiddlewareOptions struct { CredentialsProvider aws.CredentialsProvider + ExpressCredentials S3ExpressCredentialsProvider V4Presigner v4.HTTPPresigner V4aPresigner v4a.HTTPPresigner LogSigning bool @@ -139,6 +141,9 @@ type PresignHTTPRequestMiddleware struct { // cred provider and signer for sigv4 credentialsProvider aws.CredentialsProvider + // s3Express credentials + expressCredentials S3ExpressCredentialsProvider + // sigV4 signer v4Signer v4.HTTPPresigner @@ -153,6 +158,7 @@ type PresignHTTPRequestMiddleware struct { func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { return &PresignHTTPRequestMiddleware{ credentialsProvider: options.CredentialsProvider, + expressCredentials: options.ExpressCredentials, v4Signer: options.V4Presigner, v4aSigner: options.V4aPresigner, logSigning: options.LogSigning, @@ -187,7 +193,6 @@ func (p *PresignHTTPRequestMiddleware) HandleFinalize( LogSigning: p.logSigning, }) return mw.HandleFinalize(ctx, in, next) - case "aws.auth#sigv4": mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ CredentialsProvider: p.credentialsProvider, @@ -195,6 +200,16 @@ func (p *PresignHTTPRequestMiddleware) HandleFinalize( LogSigning: p.logSigning, }) return mw.HandleFinalize(ctx, in, next) + case s3ExpressSignerVersion: + mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: &s3ExpressCredentialsAdapter{ + provider: p.expressCredentials, + bucket: GetBucket(ctx), + }, + Presigner: &s3ExpressPresignerAdapter{v4: p.v4Signer}, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) default: return out, metadata, fmt.Errorf("unsupported signer type \"%s\"", signerVersion) } diff --git a/service/s3/internal/customizations/update_endpoint_test.go b/service/s3/internal/customizations/update_endpoint_test.go index 259a871c25b..b8c5ef1c4dc 100644 --- a/service/s3/internal/customizations/update_endpoint_test.go +++ b/service/s3/internal/customizations/update_endpoint_test.go @@ -1004,7 +1004,7 @@ func TestVPC_CustomEndpoint(t *testing.T) { operation: func(ctx context.Context, svc *s3.Client, fm *requestRetriever) (interface{}, error) { return svc.ListBuckets(ctx, &s3.ListBucketsInput{}, addRequestRetriever(fm)) }, - expectedReqURL: "https://bucket.vpce-123-abc.s3.us-west-2.vpce.amazonaws.com/", + expectedReqURL: "https://bucket.vpce-123-abc.s3.us-west-2.vpce.amazonaws.com/?x-id=ListBuckets", expectedSigningName: "s3", expectedSigningRegion: "us-west-2", }, diff --git a/service/s3/options.go b/service/s3/options.go index 5afb64d635c..a63d04a134c 100644 --- a/service/s3/options.go +++ b/service/s3/options.go @@ -9,6 +9,7 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" "github.com/aws/aws-sdk-go-v2/internal/v4a" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" @@ -52,6 +53,10 @@ type Options struct { // Allows you to disable S3 Multi-Region access points feature. DisableMultiRegionAccessPoints bool + // Disables this client's usage of Session Auth for S3Express buckets and reverts + // to using conventional SigV4 for those. + DisableS3ExpressSessionAuth *bool + // The endpoint options to be used when attempting to resolve an endpoint. EndpointOptions EndpointResolverOptions @@ -68,6 +73,9 @@ type Options struct { // used over the deprecated EndpointResolver. EndpointResolverV2 EndpointResolverV2 + // The credentials provider for S3Express requests. + ExpressCredentials ExpressCredentialsProvider + // Signature Version 4 (SigV4) Signer HTTPSignerV4 HTTPSignerV4 @@ -164,6 +172,9 @@ func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolve if schemeID == "aws.auth#sigv4" { return getSigV4IdentityResolver(o) } + if schemeID == "com.amazonaws.s3#sigv4express" { + return getExpressIdentityResolver(o) + } if schemeID == "aws.auth#sigv4a" { return getSigV4AIdentityResolver(o) } @@ -296,3 +307,10 @@ func ignoreAnonymousAuth(options *Options) { options.Credentials = nil } } + +func getExpressIdentityResolver(o Options) smithyauth.IdentityResolver { + if o.ExpressCredentials != nil { + return &s3cust.ExpressIdentityResolver{Provider: o.ExpressCredentials} + } + return nil +} diff --git a/service/s3/serializers.go b/service/s3/serializers.go index bae7f32e249..59524bdcbd6 100644 --- a/service/s3/serializers.go +++ b/service/s3/serializers.go @@ -828,6 +828,67 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti return nil } +type awsRestxml_serializeOpCreateSession struct { +} + +func (*awsRestxml_serializeOpCreateSession) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateSessionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?session") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateSessionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.SessionMode) > 0 { + locationName := "X-Amz-Create-Session-Mode" + encoder.SetHeader(locationName).String(string(v.SessionMode)) + } + + return nil +} + type awsRestxml_serializeOpDeleteBucket struct { } @@ -4445,7 +4506,7 @@ func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/") + opPath, opQuery := httpbinding.SplitURI("/?x-id=ListBuckets") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" @@ -4476,6 +4537,70 @@ func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, enc return nil } +type awsRestxml_serializeOpListDirectoryBuckets struct { +} + +func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDirectoryBucketsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?x-id=ListDirectoryBuckets") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.MaxDirectoryBuckets != nil { + encoder.SetQuery("max-directory-buckets").Integer(*v.MaxDirectoryBuckets) + } + + return nil +} + type awsRestxml_serializeOpListMultipartUploads struct { } @@ -8765,6 +8890,33 @@ func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.Analytics return nil } +func awsRestxml_serializeDocumentBucketInfo(v *types.BucketInfo, value smithyxml.Value) error { + defer value.Close() + if len(v.DataRedundancy) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DataRedundancy", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.DataRedundancy)) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error { defer value.Close() if v.Rules != nil { @@ -9046,6 +9198,32 @@ func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.V func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error { defer value.Close() + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentBucketInfo(v.Bucket, el); err != nil { + return err + } + } + if v.Location != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Location", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLocationInfo(v.Location, el); err != nil { + return err + } + } if len(v.LocationConstraint) > 0 { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -10493,6 +10671,33 @@ func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value s return nil } +func awsRestxml_serializeDocumentLocationInfo(v *types.LocationInfo, value smithyxml.Value) error { + defer value.Close() + if v.Name != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Name) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error { defer value.Close() if v.TargetBucket != nil { diff --git a/service/s3/types/enums.go b/service/s3/types/enums.go index 60a0fd2d76c..ea3b9c82acc 100644 --- a/service/s3/types/enums.go +++ b/service/s3/types/enums.go @@ -86,6 +86,7 @@ const ( BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2" BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3" BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1" + BucketLocationConstraintApSouth2 BucketLocationConstraint = "ap-south-2" BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" @@ -96,6 +97,7 @@ const ( BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" + BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" @@ -106,8 +108,6 @@ const ( BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1" BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1" BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2" - BucketLocationConstraintApSouth2 BucketLocationConstraint = "ap-south-2" - BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" ) // Values returns all known values for BucketLocationConstraint. Note that this @@ -121,6 +121,7 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "ap-northeast-2", "ap-northeast-3", "ap-south-1", + "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", @@ -131,6 +132,7 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "eu-central-1", "eu-north-1", "eu-south-1", + "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", @@ -141,8 +143,6 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "us-gov-west-1", "us-west-1", "us-west-2", - "ap-south-2", - "eu-south-2", } } @@ -166,6 +166,22 @@ func (BucketLogsPermission) Values() []BucketLogsPermission { } } +type BucketType string + +// Enum values for BucketType +const ( + BucketTypeDirectory BucketType = "Directory" +) + +// Values returns all known values for BucketType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (BucketType) Values() []BucketType { + return []BucketType{ + "Directory", + } +} + type BucketVersioningStatus string // Enum values for BucketVersioningStatus @@ -242,6 +258,22 @@ func (CompressionType) Values() []CompressionType { } } +type DataRedundancy string + +// Enum values for DataRedundancy +const ( + DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" +) + +// Values returns all known values for DataRedundancy. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DataRedundancy) Values() []DataRedundancy { + return []DataRedundancy{ + "SingleAvailabilityZone", + } +} + type DeleteMarkerReplicationStatus string // Enum values for DeleteMarkerReplicationStatus @@ -592,6 +624,22 @@ func (JSONType) Values() []JSONType { } } +type LocationType string + +// Enum values for LocationType +const ( + LocationTypeAvailabilityZone LocationType = "AvailabilityZone" +) + +// Values returns all known values for LocationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LocationType) Values() []LocationType { + return []LocationType{ + "AvailabilityZone", + } +} + type MetadataDirective string // Enum values for MetadataDirective @@ -820,6 +868,7 @@ const ( ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR" ObjectStorageClassSnow ObjectStorageClass = "SNOW" + ObjectStorageClassExpressOnezone ObjectStorageClass = "EXPRESS_ONEZONE" ) // Values returns all known values for ObjectStorageClass. Note that this can be @@ -837,6 +886,7 @@ func (ObjectStorageClass) Values() []ObjectStorageClass { "OUTPOSTS", "GLACIER_IR", "SNOW", + "EXPRESS_ONEZONE", } } @@ -1130,6 +1180,24 @@ func (ServerSideEncryption) Values() []ServerSideEncryption { } } +type SessionMode string + +// Enum values for SessionMode +const ( + SessionModeReadOnly SessionMode = "ReadOnly" + SessionModeReadWrite SessionMode = "ReadWrite" +) + +// Values returns all known values for SessionMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (SessionMode) Values() []SessionMode { + return []SessionMode{ + "ReadOnly", + "ReadWrite", + } +} + type SseKmsEncryptedObjectsStatus string // Enum values for SseKmsEncryptedObjectsStatus @@ -1163,6 +1231,7 @@ const ( StorageClassOutposts StorageClass = "OUTPOSTS" StorageClassGlacierIr StorageClass = "GLACIER_IR" StorageClassSnow StorageClass = "SNOW" + StorageClassExpressOnezone StorageClass = "EXPRESS_ONEZONE" ) // Values returns all known values for StorageClass. Note that this can be @@ -1180,6 +1249,7 @@ func (StorageClass) Values() []StorageClass { "OUTPOSTS", "GLACIER_IR", "SNOW", + "EXPRESS_ONEZONE", } } diff --git a/service/s3/types/errors.go b/service/s3/types/errors.go index f3837866d10..166484f4ec2 100644 --- a/service/s3/types/errors.go +++ b/service/s3/types/errors.go @@ -64,7 +64,14 @@ func (e *BucketAlreadyOwnedByYou) ErrorCode() string { } func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Object is archived and inaccessible until restored. +// Object is archived and inaccessible until restored. If the object you are +// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 +// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access +// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can +// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// . Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. type InvalidObjectState struct { Message *string diff --git a/service/s3/types/types.go b/service/s3/types/types.go index 26d4687d57c..d3f7593fe76 100644 --- a/service/s3/types/types.go +++ b/service/s3/types/types.go @@ -173,9 +173,7 @@ type AnalyticsS3BucketDestination struct { noSmithyDocumentSerde } -// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is -// globally unique, and the namespace is shared by all Amazon Web Services -// accounts. +// In terms of implementation, a Bucket is a resource. type Bucket struct { // Date the bucket was created. This date can change when making changes to your @@ -188,6 +186,21 @@ type Bucket struct { noSmithyDocumentSerde } +// Specifies the information about the bucket that will be created. For more +// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. This functionality is only supported by directory +// buckets. +type BucketInfo struct { + + // The number of Availability Zone that's used for redundancy for the bucket. + DataRedundancy DataRedundancy + + // The type of bucket. + Type BucketType + + noSmithyDocumentSerde +} + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For // more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) // in the Amazon S3 User Guide. @@ -216,30 +229,38 @@ type BucketLoggingStatus struct { type Checksum struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -273,30 +294,38 @@ type CompletedMultipartUpload struct { type CompletedPart struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -305,6 +334,14 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between 1 and // 10,000. + // - General purpose buckets - In CompleteMultipartUpload , when a additional + // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , + // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the + // PartNumber must start at 1 and the part numbers must be consecutive. + // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an + // InvalidPartOrder error code. + // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start + // at 1 and the part numbers must be consecutive. PartNumber *int32 noSmithyDocumentSerde @@ -346,30 +383,26 @@ type ContinuationEvent struct { type CopyObjectResult struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -387,30 +420,38 @@ type CopyObjectResult struct { type CopyPartResult struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -474,8 +515,23 @@ type CORSRule struct { // The configuration information for the bucket. type CreateBucketConfiguration struct { - // Specifies the Region where the bucket will be created. If you don't specify a - // Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + // Specifies the information about the bucket that will be created. This + // functionality is only supported by directory buckets. + Bucket *BucketInfo + + // Specifies the location where the bucket will be created. For directory buckets, + // the location type is Availability Zone. This functionality is only supported by + // directory buckets. + Location *LocationInfo + + // Specifies the Region where the bucket will be created. You might choose a + // Region to optimize latency, minimize costs, or address regulatory requirements. + // For example, if you reside in Europe, you will probably find it advantageous to + // create buckets in the Europe (Ireland) Region. For more information, see + // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is + // created in the US East (N. Virginia) Region (us-east-1) by default. This + // functionality is not supported for directory buckets. LocationConstraint BucketLocationConstraint noSmithyDocumentSerde @@ -579,13 +635,16 @@ type DefaultRetention struct { // Container for the objects to delete. type Delete struct { - // The object to delete. + // The object to delete. Directory buckets - For directory buckets, an object + // that's composed entirely of whitespace characters is not supported by the + // DeleteObjects API operation. The request will receive a 400 Bad Request error + // and none of the objects in the request will be deleted. // // This member is required. Objects []ObjectIdentifier // Element to enable quiet mode for the request. When you add this element, you - // must set its value to true. + // must set its value to true . Quiet *bool noSmithyDocumentSerde @@ -597,18 +656,21 @@ type DeletedObject struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. + // object is a delete marker. This functionality is not supported for directory + // buckets. DeleteMarker *bool // The version ID of the delete marker created as a result of the DELETE // operation. If you delete a specific object version, the value returned by this - // header is the version ID of the object version deleted. + // header is the version ID of the object version deleted. This functionality is + // not supported for directory buckets. DeleteMarkerVersionId *string // The name of the deleted object. Key *string - // The version ID of the deleted object. + // The version ID of the deleted object. This functionality is not supported for + // directory buckets. VersionId *string noSmithyDocumentSerde @@ -1178,7 +1240,8 @@ type Error struct { // error message. Message *string - // The version ID of the error. + // The version ID of the error. This functionality is not supported for directory + // buckets. VersionId *string noSmithyDocumentSerde @@ -1255,6 +1318,13 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // - General purpose buckets - For GetObjectAttributes , if a additional checksum + // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 + // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the + // request, the response doesn't return Part . + // - Directory buckets - For GetObjectAttributes , no matter whether a additional + // checksum is applied to the object specified in the request, the response returns + // Part . Parts []ObjectPart // The total number of parts. @@ -1342,11 +1412,15 @@ type IndexDocument struct { // Container element that identifies who initiated the multipart upload. type Initiator struct { - // Name of the Principal. + // Name of the Principal. This functionality is not supported for directory + // buckets. DisplayName *string // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // Directory buckets - If the principal is an Amazon Web Services account, it + // provides the Amazon Web Services account ID. If the principal is an IAM User, it + // provides a user ARN value. ID *string noSmithyDocumentSerde @@ -1773,6 +1847,24 @@ type LifecycleRuleFilterMemberTag struct { func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} +// Specifies the location where the bucket will be created. For directory buckets, +// the location type is Availability Zone. For more information about directory +// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. This functionality is only supported by directory +// buckets. +type LocationInfo struct { + + // The name of the location where the bucket will be created. For directory + // buckets, the AZ ID of the Availability Zone where the bucket will be created. An + // example AZ ID value is usw2-az2 . + Name *string + + // The type of location where the bucket will be created. + Type LocationType + + noSmithyDocumentSerde +} + // Describes where logs are stored and the prefix that Amazon S3 assigns to all // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) // in the Amazon S3 API Reference. @@ -1944,9 +2036,13 @@ type MultipartUpload struct { Key *string // Specifies the owner of the object that is part of the multipart upload. + // Directory buckets - The bucket owner is returned as the object owner for all the + // objects. Owner *Owner - // The class of storage used to store the object. + // The class of storage used to store the object. Directory buckets - Only the S3 + // Express One Zone storage class is supported by directory buckets to store + // objects. StorageClass StorageClass // Upload ID that identifies the multipart upload. @@ -2062,6 +2158,7 @@ type Object struct { // encryption. If an object is larger than 16 MB, the Amazon Web Services // Management Console will upload or copy that object as a Multipart Upload, and // therefore the ETag will not be an MD5 digest. + // Directory buckets - MD5 is not supported by directory buckets. ETag *string // The name that you assign to an object. You use the object key to retrieve the @@ -2071,20 +2168,25 @@ type Object struct { // Creation date of the object. LastModified *time.Time - // The owner of the object + // The owner of the object Directory buckets - The bucket owner is returned as the + // object owner. Owner *Owner // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information // about these storage classes and how to work with archived objects, see Working // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Only the S3 Express One Zone storage class is supported by directory + // buckets to store objects. RestoreStatus *RestoreStatus // Size in bytes of the object Size *int64 - // The class of storage used to store the object. + // The class of storage used to store the object. Directory buckets - Only the S3 + // Express One Zone storage class is supported by directory buckets to store + // objects. StorageClass ObjectStorageClass noSmithyDocumentSerde @@ -2101,7 +2203,8 @@ type ObjectIdentifier struct { // This member is required. Key *string - // VersionId for the specific version of the object to delete. + // Version ID for the specific version of the object to delete. This functionality + // is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -2167,23 +2270,29 @@ type ObjectPart struct { ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -2272,6 +2381,7 @@ type Owner struct { // - Asia Pacific (Tokyo) // - Europe (Ireland) // - South America (São Paulo) + // This functionality is not supported for directory buckets. DisplayName *string // Container for the ID of the owner. @@ -2302,9 +2412,16 @@ type OwnershipControlsRule struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or bucket owner full control ACLs, such as the - // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed - // in the XML format. + // specify an ACL or specify bucket owner full control ACLs (such as the predefined + // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced + // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon + // use cases where you must control access for each object individually. For more + // information about S3 Object Ownership, see Controlling ownership of objects and + // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Directory buckets use the bucket owner enforced setting for S3 Object + // Ownership. // // This member is required. ObjectOwnership ObjectOwnership @@ -2328,16 +2445,20 @@ type Part struct { ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string @@ -2805,7 +2926,9 @@ type RestoreRequest struct { // classes must be restored before they can be retrieved. For more information // about these storage classes and how to work with archived objects, see Working // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) -// in the Amazon S3 User Guide. +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. Only the S3 Express One Zone storage class is supported by directory +// buckets to store objects. type RestoreStatus struct { // Specifies whether the object is currently being restored. If the object @@ -3063,6 +3186,43 @@ type ServerSideEncryptionRule struct { noSmithyDocumentSerde } +// The established temporary security credentials of the session. Directory +// buckets - These session credentials are only supported for the authentication +// and authorization of Zonal endpoint APIs on directory buckets. +type SessionCredentials struct { + + // A unique identifier that's associated with a secret access key. The access key + // ID and the secret access key are used together to sign programmatic Amazon Web + // Services requests cryptographically. + // + // This member is required. + AccessKeyId *string + + // Temporary security credentials expire after a specified interval. After + // temporary credentials expire, any calls that you make with those credentials + // will fail. So you must generate a new set of temporary credentials. Temporary + // credentials cannot be extended or refreshed beyond the original specified + // interval. + // + // This member is required. + Expiration *time.Time + + // A key that's used with the access key ID to cryptographically sign programmatic + // Amazon Web Services requests. Signing a request identifies the sender and + // prevents the request from being altered. + // + // This member is required. + SecretAccessKey *string + + // A part of the temporary security credentials. The session token is used to + // validate the temporary security credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + // To use simple format for S3 keys for log objects, set SimplePrefix to an empty // object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] type SimplePrefix struct { diff --git a/service/s3/validators.go b/service/s3/validators.go index ac560f7df4f..e954b302df9 100644 --- a/service/s3/validators.go +++ b/service/s3/validators.go @@ -110,6 +110,26 @@ func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpCreateSession struct { +} + +func (*validateOpCreateSession) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateSessionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateSessionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteBucketAnalyticsConfiguration struct { } @@ -1870,6 +1890,10 @@ func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) } +func addOpCreateSessionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateSession{}, middleware.After) +} + func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After) } @@ -3931,6 +3955,21 @@ func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { } } +func validateOpCreateSessionInput(v *CreateSessionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateSessionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error { if v == nil { return nil