diff --git a/.changelog/1a34dece0323423db0c8bb503980931a.json b/.changelog/1a34dece0323423db0c8bb503980931a.json new file mode 100644 index 00000000000..928045599e5 --- /dev/null +++ b/.changelog/1a34dece0323423db0c8bb503980931a.json @@ -0,0 +1,8 @@ +{ + "id": "1a34dece-0323-423d-b0c8-bb503980931a", + "type": "feature", + "description": "This release adds new collection type VectorSearch.", + "modules": [ + "service/opensearchserverless" + ] +} \ No newline at end of file diff --git a/.changelog/1f9f9405ad104ce184d1623484ed5126.json b/.changelog/1f9f9405ad104ce184d1623484ed5126.json new file mode 100644 index 00000000000..a12d387b90e --- /dev/null +++ b/.changelog/1f9f9405ad104ce184d1623484ed5126.json @@ -0,0 +1,8 @@ +{ + "id": "1f9f9405-ad10-4ce1-84d1-623484ed5126", + "type": "documentation", + "description": "This release includes general updates to user documentation.", + "modules": [ + "service/mediaconvert" + ] +} \ No newline at end of file diff --git a/.changelog/2b8b45b8e9024a8ba11e1d7538c64275.json b/.changelog/2b8b45b8e9024a8ba11e1d7538c64275.json new file mode 100644 index 00000000000..d8dafcd3e5e --- /dev/null +++ b/.changelog/2b8b45b8e9024a8ba11e1d7538c64275.json @@ -0,0 +1,8 @@ +{ + "id": "2b8b45b8-e902-4a8b-a11e-1d7538c64275", + "type": "feature", + "description": "AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information.", + "modules": [ + "service/entityresolution" + ] +} \ No newline at end of file diff --git a/.changelog/333aec8b3a38471fad87088a1f8405e0.json b/.changelog/333aec8b3a38471fad87088a1f8405e0.json new file mode 100644 index 00000000000..080f38e64e7 --- /dev/null +++ b/.changelog/333aec8b3a38471fad87088a1f8405e0.json @@ -0,0 +1,8 @@ +{ + "id": "333aec8b-3a38-471f-ad87-088a1f8405e0", + "type": "feature", + "description": "Amazon Polly adds 1 new voice - Lisa (nl-BE)", + "modules": [ + "service/polly" + ] +} \ No newline at end of file diff --git a/.changelog/41575353444b40ffbf474f4155544f00.json b/.changelog/41575353444b40ffbf474f4155544f00.json new file mode 100644 index 00000000000..abb54a5ec83 --- /dev/null +++ b/.changelog/41575353444b40ffbf474f4155544f00.json @@ -0,0 +1,9 @@ +{ + "id": "41575353-444b-40ff-bf47-4f4155544f00", + "type": "release", + "description": "New AWS service client module", + "modules": [ + "service/entityresolution", + "service/managedblockchainquery" + ] +} \ No newline at end of file diff --git a/.changelog/5814b067b77a4e7f820578152ea0188c.json b/.changelog/5814b067b77a4e7f820578152ea0188c.json new file mode 100644 index 00000000000..e0c93834116 --- /dev/null +++ b/.changelog/5814b067b77a4e7f820578152ea0188c.json @@ -0,0 +1,8 @@ +{ + "id": "5814b067-b77a-4e7f-8205-78152ea0188c", + "type": "documentation", + "description": "Update that corrects the documents for received feedback.", + "modules": [ + "service/route53" + ] +} \ No newline at end of file diff --git a/.changelog/6df6c4cb34434e59901c7eb2a40c6202.json b/.changelog/6df6c4cb34434e59901c7eb2a40c6202.json new file mode 100644 index 00000000000..7b238fe0cdd --- /dev/null +++ b/.changelog/6df6c4cb34434e59901c7eb2a40c6202.json @@ -0,0 +1,8 @@ +{ + "id": "6df6c4cb-3443-4e59-901c-7eb2a40c6202", + "type": "feature", + "description": "Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs.", + "modules": [ + "service/managedblockchainquery" + ] +} \ No newline at end of file diff --git a/.changelog/900bd96bc46e48d49feed47ad876ea0d.json b/.changelog/900bd96bc46e48d49feed47ad876ea0d.json new file mode 100644 index 00000000000..9b84bbd313e --- /dev/null +++ b/.changelog/900bd96bc46e48d49feed47ad876ea0d.json @@ -0,0 +1,8 @@ +{ + "id": "900bd96b-c46e-48d4-9fee-d47ad876ea0d", + "type": "documentation", + "description": "The service is renaming as a part of AWS Health.", + "modules": [ + "service/omics" + ] +} \ No newline at end of file diff --git a/.changelog/9dd4c9ff909a403895bb0de496d22483.json b/.changelog/9dd4c9ff909a403895bb0de496d22483.json new file mode 100644 index 00000000000..d07f1aaf874 --- /dev/null +++ b/.changelog/9dd4c9ff909a403895bb0de496d22483.json @@ -0,0 +1,8 @@ +{ + "id": "9dd4c9ff-909a-4038-95bb-0de496d22483", + "type": "documentation", + "description": "Updating the HealthLake service documentation.", + "modules": [ + "service/healthlake" + ] +} \ No newline at end of file diff --git a/.changelog/b2ab71110f9a46f0ae96068b2c66a909.json b/.changelog/b2ab71110f9a46f0ae96068b2c66a909.json new file mode 100644 index 00000000000..7840eabd931 --- /dev/null +++ b/.changelog/b2ab71110f9a46f0ae96068b2c66a909.json @@ -0,0 +1,8 @@ +{ + "id": "b2ab7111-0f9a-46f0-ae96-068b2c66a909", + "type": "feature", + "description": "Release Glue Studio Snowflake Connector Node for SDK/CLI", + "modules": [ + "service/glue" + ] +} \ No newline at end of file diff --git a/.changelog/b537348e67ce4e21acb4201d99b0bdf5.json b/.changelog/b537348e67ce4e21acb4201d99b0bdf5.json new file mode 100644 index 00000000000..3faeeb81386 --- /dev/null +++ b/.changelog/b537348e67ce4e21acb4201d99b0bdf5.json @@ -0,0 +1,8 @@ +{ + "id": "b537348e-67ce-4e21-acb4-201d99b0bdf5", + "type": "documentation", + "description": "Updates the documentation for CreateResource.", + "modules": [ + "service/cloudcontrol" + ] +} \ No newline at end of file diff --git a/service/entityresolution/LICENSE.txt b/service/entityresolution/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/service/entityresolution/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/service/entityresolution/api_client.go b/service/entityresolution/api_client.go new file mode 100644 index 00000000000..9fe28cd2091 --- /dev/null +++ b/service/entityresolution/api_client.go @@ -0,0 +1,445 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "EntityResolution" +const ServiceAPIVersion = "2018-05-10" + +// Client provides the API client to make operations call for AWS EntityResolution. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "entityresolution", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/service/entityresolution/api_client_test.go b/service/entityresolution/api_client_test.go new file mode 100644 index 00000000000..7433efd5a97 --- /dev/null +++ b/service/entityresolution/api_client_test.go @@ -0,0 +1,123 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io/ioutil" + "net/http" + "strings" + "testing" +) + +func TestClient_resolveRetryOptions(t *testing.T) { + nopClient := smithyhttp.ClientDoFunc(func(_ *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader("")), + }, nil + }) + + cases := map[string]struct { + defaultsMode aws.DefaultsMode + retryer aws.Retryer + retryMaxAttempts int + opRetryMaxAttempts *int + retryMode aws.RetryMode + expectClientRetryMode aws.RetryMode + expectClientMaxAttempts int + expectOpMaxAttempts int + }{ + "defaults": { + defaultsMode: aws.DefaultsModeStandard, + expectClientRetryMode: aws.RetryModeStandard, + expectClientMaxAttempts: 3, + expectOpMaxAttempts: 3, + }, + "custom default retry": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(2), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 2, + }, + "custom op no change max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(10), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op 0 max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(0), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + client := NewFromConfig(aws.Config{ + DefaultsMode: c.defaultsMode, + Retryer: func() func() aws.Retryer { + if c.retryer == nil { + return nil + } + + return func() aws.Retryer { return c.retryer } + }(), + HTTPClient: nopClient, + RetryMaxAttempts: c.retryMaxAttempts, + RetryMode: c.retryMode, + }) + + if e, a := c.expectClientRetryMode, client.options.RetryMode; e != a { + t.Errorf("expect %v retry mode, got %v", e, a) + } + if e, a := c.expectClientMaxAttempts, client.options.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v max attempts, got %v", e, a) + } + + _, _, err := client.invokeOperation(context.Background(), "mockOperation", struct{}{}, + []func(*Options){ + func(o *Options) { + if c.opRetryMaxAttempts == nil { + return + } + o.RetryMaxAttempts = *c.opRetryMaxAttempts + }, + }, + func(s *middleware.Stack, o Options) error { + s.Initialize.Clear() + s.Serialize.Clear() + s.Build.Clear() + s.Finalize.Clear() + s.Deserialize.Clear() + + if e, a := c.expectOpMaxAttempts, o.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v op max attempts, got %v", e, a) + } + return nil + }) + if err != nil { + t.Fatalf("expect no operation error, got %v", err) + } + }) + } +} diff --git a/service/entityresolution/api_op_CreateMatchingWorkflow.go b/service/entityresolution/api_op_CreateMatchingWorkflow.go new file mode 100644 index 00000000000..63af845118b --- /dev/null +++ b/service/entityresolution/api_op_CreateMatchingWorkflow.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a MatchingWorkflow object which stores the configuration of the data +// processing job to be run. It is important to note that there should not be a +// pre-existing MatchingWorkflow with the same name. To modify an existing +// workflow, utilize the UpdateMatchingWorkflow API. +func (c *Client) CreateMatchingWorkflow(ctx context.Context, params *CreateMatchingWorkflowInput, optFns ...func(*Options)) (*CreateMatchingWorkflowOutput, error) { + if params == nil { + params = &CreateMatchingWorkflowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateMatchingWorkflow", params, optFns, c.addOperationCreateMatchingWorkflowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateMatchingWorkflowOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateMatchingWorkflowInput struct { + + // A list of InputSource objects, which have the fields InputSourceARN and + // SchemaName . + // + // This member is required. + InputSourceConfig []types.InputSource + + // A list of OutputSource objects, each of which contains fields OutputS3Path , + // ApplyNormalization , and Output . + // + // This member is required. + OutputSourceConfig []types.OutputSource + + // An object which defines the resolutionType and the ruleBasedProperties + // + // This member is required. + ResolutionTechniques *types.ResolutionTechniques + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // This member is required. + RoleArn *string + + // The name of the workflow. There cannot be multiple DataIntegrationWorkflows + // with the same name. + // + // This member is required. + WorkflowName *string + + // A description of the workflow. + Description *string + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *types.IncrementalRunConfig + + // The tags used to organize, track, or control access for this resource. + Tags map[string]string + + noSmithyDocumentSerde +} + +type CreateMatchingWorkflowOutput struct { + + // A list of InputSource objects, which have the fields InputSourceARN and + // SchemaName . + // + // This member is required. + InputSourceConfig []types.InputSource + + // A list of OutputSource objects, each of which contains fields OutputS3Path , + // ApplyNormalization , and Output . + // + // This member is required. + OutputSourceConfig []types.OutputSource + + // An object which defines the resolutionType and the ruleBasedProperties + // + // This member is required. + ResolutionTechniques *types.ResolutionTechniques + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // This member is required. + RoleArn *string + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the + // MatchingWorkflow . + // + // This member is required. + WorkflowArn *string + + // The name of the workflow. + // + // This member is required. + WorkflowName *string + + // A description of the workflow. + Description *string + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *types.IncrementalRunConfig + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateMatchingWorkflowMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateMatchingWorkflowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMatchingWorkflow(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateMatchingWorkflow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "CreateMatchingWorkflow", + } +} diff --git a/service/entityresolution/api_op_CreateSchemaMapping.go b/service/entityresolution/api_op_CreateSchemaMapping.go new file mode 100644 index 00000000000..2d22487cd3a --- /dev/null +++ b/service/entityresolution/api_op_CreateSchemaMapping.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a schema mapping, which defines the schema of the input customer +// records table. The SchemaMapping also provides Entity Resolution with some +// metadata about the table, such as the attribute types of the columns and which +// columns to match on. +func (c *Client) CreateSchemaMapping(ctx context.Context, params *CreateSchemaMappingInput, optFns ...func(*Options)) (*CreateSchemaMappingOutput, error) { + if params == nil { + params = &CreateSchemaMappingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateSchemaMapping", params, optFns, c.addOperationCreateSchemaMappingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateSchemaMappingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateSchemaMappingInput struct { + + // The name of the schema. There cannot be multiple SchemaMappings with the same + // name. + // + // This member is required. + SchemaName *string + + // A description of the schema. + Description *string + + // A list of MappedInputFields . Each MappedInputField corresponds to a column the + // source data table, and contains column name plus additional information that + // Entity Resolution uses for matching. + MappedInputFields []types.SchemaInputAttribute + + // The tags used to organize, track, or control access for this resource. + Tags map[string]string + + noSmithyDocumentSerde +} + +type CreateSchemaMappingOutput struct { + + // A description of the schema. + // + // This member is required. + Description *string + + // A list of MappedInputFields . Each MappedInputField corresponds to a column the + // source data table, and contains column name plus additional information that + // Entity Resolution uses for matching. + // + // This member is required. + MappedInputFields []types.SchemaInputAttribute + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the + // SchemaMapping . + // + // This member is required. + SchemaArn *string + + // The name of the schema. + // + // This member is required. + SchemaName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateSchemaMappingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateSchemaMapping{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateSchemaMapping{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateSchemaMappingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSchemaMapping(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateSchemaMapping(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "CreateSchemaMapping", + } +} diff --git a/service/entityresolution/api_op_DeleteMatchingWorkflow.go b/service/entityresolution/api_op_DeleteMatchingWorkflow.go new file mode 100644 index 00000000000..10b56f92c5a --- /dev/null +++ b/service/entityresolution/api_op_DeleteMatchingWorkflow.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the MatchingWorkflow with a given name. This operation will succeed +// even if a workflow with the given name does not exist. +func (c *Client) DeleteMatchingWorkflow(ctx context.Context, params *DeleteMatchingWorkflowInput, optFns ...func(*Options)) (*DeleteMatchingWorkflowOutput, error) { + if params == nil { + params = &DeleteMatchingWorkflowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteMatchingWorkflow", params, optFns, c.addOperationDeleteMatchingWorkflowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteMatchingWorkflowOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteMatchingWorkflowInput struct { + + // The name of the workflow to be retrieved. + // + // This member is required. + WorkflowName *string + + noSmithyDocumentSerde +} + +type DeleteMatchingWorkflowOutput struct { + + // A successful operation message. + // + // This member is required. + Message *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteMatchingWorkflowMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteMatchingWorkflowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteMatchingWorkflow(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteMatchingWorkflow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "DeleteMatchingWorkflow", + } +} diff --git a/service/entityresolution/api_op_DeleteSchemaMapping.go b/service/entityresolution/api_op_DeleteSchemaMapping.go new file mode 100644 index 00000000000..0c36627b6f6 --- /dev/null +++ b/service/entityresolution/api_op_DeleteSchemaMapping.go @@ -0,0 +1,128 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the SchemaMapping with a given name. This operation will succeed even +// if a schema with the given name does not exist. This operation will fail if +// there is a DataIntegrationWorkflow object that references the SchemaMapping in +// the workflow's InputSourceConfig . +func (c *Client) DeleteSchemaMapping(ctx context.Context, params *DeleteSchemaMappingInput, optFns ...func(*Options)) (*DeleteSchemaMappingOutput, error) { + if params == nil { + params = &DeleteSchemaMappingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteSchemaMapping", params, optFns, c.addOperationDeleteSchemaMappingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteSchemaMappingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteSchemaMappingInput struct { + + // The name of the schema to delete. + // + // This member is required. + SchemaName *string + + noSmithyDocumentSerde +} + +type DeleteSchemaMappingOutput struct { + + // A successful operation message. + // + // This member is required. + Message *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteSchemaMappingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteSchemaMapping{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteSchemaMapping{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteSchemaMappingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteSchemaMapping(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteSchemaMapping(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "DeleteSchemaMapping", + } +} diff --git a/service/entityresolution/api_op_GetMatchId.go b/service/entityresolution/api_op_GetMatchId.go new file mode 100644 index 00000000000..e7150a1a528 --- /dev/null +++ b/service/entityresolution/api_op_GetMatchId.go @@ -0,0 +1,129 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the corresponding Match ID of a customer record if the record has been +// processed. +func (c *Client) GetMatchId(ctx context.Context, params *GetMatchIdInput, optFns ...func(*Options)) (*GetMatchIdOutput, error) { + if params == nil { + params = &GetMatchIdInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMatchId", params, optFns, c.addOperationGetMatchIdMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetMatchIdOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetMatchIdInput struct { + + // The record to fetch the Match ID for. + // + // This member is required. + Record map[string]string + + // The name of the workflow. + // + // This member is required. + WorkflowName *string + + noSmithyDocumentSerde +} + +type GetMatchIdOutput struct { + + // The unique identifiers for this group of match records. + MatchId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetMatchIdMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetMatchId{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetMatchId{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetMatchIdValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetMatchId(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetMatchId(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "GetMatchId", + } +} diff --git a/service/entityresolution/api_op_GetMatchingJob.go b/service/entityresolution/api_op_GetMatchingJob.go new file mode 100644 index 00000000000..6730ad3be85 --- /dev/null +++ b/service/entityresolution/api_op_GetMatchingJob.go @@ -0,0 +1,153 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Gets the status, metrics, and errors (if there are any) that are associated +// with a job. +func (c *Client) GetMatchingJob(ctx context.Context, params *GetMatchingJobInput, optFns ...func(*Options)) (*GetMatchingJobOutput, error) { + if params == nil { + params = &GetMatchingJobInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMatchingJob", params, optFns, c.addOperationGetMatchingJobMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetMatchingJobOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetMatchingJobInput struct { + + // The ID of the job. + // + // This member is required. + JobId *string + + // The name of the workflow. + // + // This member is required. + WorkflowName *string + + noSmithyDocumentSerde +} + +type GetMatchingJobOutput struct { + + // The ID of the job. + // + // This member is required. + JobId *string + + // The time at which the job was started. + // + // This member is required. + StartTime *time.Time + + // The current status of the job. Either running , succeeded , queued , or failed . + // + // This member is required. + Status types.JobStatus + + // The time at which the job has finished. + EndTime *time.Time + + // An object containing an error message, if there was an error. + ErrorDetails *types.ErrorDetails + + // Metrics associated with the execution, specifically total records processed, + // unique IDs generated, and records the execution skipped. + Metrics *types.JobMetrics + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetMatchingJobMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetMatchingJob{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetMatchingJob{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetMatchingJobValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetMatchingJob(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetMatchingJob(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "GetMatchingJob", + } +} diff --git a/service/entityresolution/api_op_GetMatchingWorkflow.go b/service/entityresolution/api_op_GetMatchingWorkflow.go new file mode 100644 index 00000000000..90a9c2f081b --- /dev/null +++ b/service/entityresolution/api_op_GetMatchingWorkflow.go @@ -0,0 +1,176 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns the MatchingWorkflow with a given name, if it exists. +func (c *Client) GetMatchingWorkflow(ctx context.Context, params *GetMatchingWorkflowInput, optFns ...func(*Options)) (*GetMatchingWorkflowOutput, error) { + if params == nil { + params = &GetMatchingWorkflowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMatchingWorkflow", params, optFns, c.addOperationGetMatchingWorkflowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetMatchingWorkflowOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetMatchingWorkflowInput struct { + + // The name of the workflow. + // + // This member is required. + WorkflowName *string + + noSmithyDocumentSerde +} + +type GetMatchingWorkflowOutput struct { + + // The timestamp of when the workflow was created. + // + // This member is required. + CreatedAt *time.Time + + // A list of InputSource objects, which have the fields InputSourceARN and + // SchemaName . + // + // This member is required. + InputSourceConfig []types.InputSource + + // A list of OutputSource objects, each of which contains fields OutputS3Path , + // ApplyNormalization , and Output . + // + // This member is required. + OutputSourceConfig []types.OutputSource + + // An object which defines the resolutionType and the ruleBasedProperties + // + // This member is required. + ResolutionTechniques *types.ResolutionTechniques + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to access resources on your behalf. + // + // This member is required. + RoleArn *string + + // The timestamp of when the workflow was last updated. + // + // This member is required. + UpdatedAt *time.Time + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the + // MatchingWorkflow . + // + // This member is required. + WorkflowArn *string + + // The name of the workflow. + // + // This member is required. + WorkflowName *string + + // A description of the workflow. + Description *string + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *types.IncrementalRunConfig + + // The tags used to organize, track, or control access for this resource. + Tags map[string]string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetMatchingWorkflowMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetMatchingWorkflowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetMatchingWorkflow(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetMatchingWorkflow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "GetMatchingWorkflow", + } +} diff --git a/service/entityresolution/api_op_GetSchemaMapping.go b/service/entityresolution/api_op_GetSchemaMapping.go new file mode 100644 index 00000000000..305650aa8e9 --- /dev/null +++ b/service/entityresolution/api_op_GetSchemaMapping.go @@ -0,0 +1,156 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns the SchemaMapping of a given name. +func (c *Client) GetSchemaMapping(ctx context.Context, params *GetSchemaMappingInput, optFns ...func(*Options)) (*GetSchemaMappingOutput, error) { + if params == nil { + params = &GetSchemaMappingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSchemaMapping", params, optFns, c.addOperationGetSchemaMappingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSchemaMappingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSchemaMappingInput struct { + + // The name of the schema to be retrieved. + // + // This member is required. + SchemaName *string + + noSmithyDocumentSerde +} + +type GetSchemaMappingOutput struct { + + // The timestamp of when the SchemaMapping was created. + // + // This member is required. + CreatedAt *time.Time + + // A list of MappedInputFields . Each MappedInputField corresponds to a column the + // source data table, and contains column name plus additional information Venice + // uses for matching. + // + // This member is required. + MappedInputFields []types.SchemaInputAttribute + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the + // SchemaMapping. + // + // This member is required. + SchemaArn *string + + // The name of the schema. + // + // This member is required. + SchemaName *string + + // The timestamp of when the SchemaMapping was last updated. + // + // This member is required. + UpdatedAt *time.Time + + // A description of the schema. + Description *string + + // The tags used to organize, track, or control access for this resource. + Tags map[string]string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSchemaMappingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetSchemaMapping{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetSchemaMapping{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetSchemaMappingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSchemaMapping(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSchemaMapping(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "GetSchemaMapping", + } +} diff --git a/service/entityresolution/api_op_ListMatchingJobs.go b/service/entityresolution/api_op_ListMatchingJobs.go new file mode 100644 index 00000000000..72924a41e4a --- /dev/null +++ b/service/entityresolution/api_op_ListMatchingJobs.go @@ -0,0 +1,225 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all jobs for a given workflow. +func (c *Client) ListMatchingJobs(ctx context.Context, params *ListMatchingJobsInput, optFns ...func(*Options)) (*ListMatchingJobsOutput, error) { + if params == nil { + params = &ListMatchingJobsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMatchingJobs", params, optFns, c.addOperationListMatchingJobsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMatchingJobsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMatchingJobsInput struct { + + // The name of the workflow to be retrieved. + // + // This member is required. + WorkflowName *string + + // The maximum number of objects returned per page. + MaxResults *int32 + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListMatchingJobsOutput struct { + + // A list of JobSummary objects, each of which contain the ID, status, start time, + // and end time of a job. + Jobs []types.JobSummary + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMatchingJobsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListMatchingJobs{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListMatchingJobs{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListMatchingJobsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMatchingJobs(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListMatchingJobsAPIClient is a client that implements the ListMatchingJobs +// operation. +type ListMatchingJobsAPIClient interface { + ListMatchingJobs(context.Context, *ListMatchingJobsInput, ...func(*Options)) (*ListMatchingJobsOutput, error) +} + +var _ ListMatchingJobsAPIClient = (*Client)(nil) + +// ListMatchingJobsPaginatorOptions is the paginator options for ListMatchingJobs +type ListMatchingJobsPaginatorOptions struct { + // The maximum number of objects returned per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListMatchingJobsPaginator is a paginator for ListMatchingJobs +type ListMatchingJobsPaginator struct { + options ListMatchingJobsPaginatorOptions + client ListMatchingJobsAPIClient + params *ListMatchingJobsInput + nextToken *string + firstPage bool +} + +// NewListMatchingJobsPaginator returns a new ListMatchingJobsPaginator +func NewListMatchingJobsPaginator(client ListMatchingJobsAPIClient, params *ListMatchingJobsInput, optFns ...func(*ListMatchingJobsPaginatorOptions)) *ListMatchingJobsPaginator { + if params == nil { + params = &ListMatchingJobsInput{} + } + + options := ListMatchingJobsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListMatchingJobsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListMatchingJobsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListMatchingJobs page. +func (p *ListMatchingJobsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMatchingJobsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListMatchingJobs(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListMatchingJobs(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "ListMatchingJobs", + } +} diff --git a/service/entityresolution/api_op_ListMatchingWorkflows.go b/service/entityresolution/api_op_ListMatchingWorkflows.go new file mode 100644 index 00000000000..e143b358c76 --- /dev/null +++ b/service/entityresolution/api_op_ListMatchingWorkflows.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all the MatchingWorkflows that have been created for an AWS +// account. +func (c *Client) ListMatchingWorkflows(ctx context.Context, params *ListMatchingWorkflowsInput, optFns ...func(*Options)) (*ListMatchingWorkflowsOutput, error) { + if params == nil { + params = &ListMatchingWorkflowsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMatchingWorkflows", params, optFns, c.addOperationListMatchingWorkflowsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMatchingWorkflowsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMatchingWorkflowsInput struct { + + // The maximum number of objects returned per page. + MaxResults *int32 + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListMatchingWorkflowsOutput struct { + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string + + // A list of MatchingWorkflowSummary objects, each of which contain the fields + // WorkflowName , WorkflowArn , CreatedAt , and UpdatedAt . + WorkflowSummaries []types.MatchingWorkflowSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMatchingWorkflowsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListMatchingWorkflows{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListMatchingWorkflows{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMatchingWorkflows(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListMatchingWorkflowsAPIClient is a client that implements the +// ListMatchingWorkflows operation. +type ListMatchingWorkflowsAPIClient interface { + ListMatchingWorkflows(context.Context, *ListMatchingWorkflowsInput, ...func(*Options)) (*ListMatchingWorkflowsOutput, error) +} + +var _ ListMatchingWorkflowsAPIClient = (*Client)(nil) + +// ListMatchingWorkflowsPaginatorOptions is the paginator options for +// ListMatchingWorkflows +type ListMatchingWorkflowsPaginatorOptions struct { + // The maximum number of objects returned per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListMatchingWorkflowsPaginator is a paginator for ListMatchingWorkflows +type ListMatchingWorkflowsPaginator struct { + options ListMatchingWorkflowsPaginatorOptions + client ListMatchingWorkflowsAPIClient + params *ListMatchingWorkflowsInput + nextToken *string + firstPage bool +} + +// NewListMatchingWorkflowsPaginator returns a new ListMatchingWorkflowsPaginator +func NewListMatchingWorkflowsPaginator(client ListMatchingWorkflowsAPIClient, params *ListMatchingWorkflowsInput, optFns ...func(*ListMatchingWorkflowsPaginatorOptions)) *ListMatchingWorkflowsPaginator { + if params == nil { + params = &ListMatchingWorkflowsInput{} + } + + options := ListMatchingWorkflowsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListMatchingWorkflowsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListMatchingWorkflowsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListMatchingWorkflows page. +func (p *ListMatchingWorkflowsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMatchingWorkflowsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListMatchingWorkflows(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListMatchingWorkflows(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "ListMatchingWorkflows", + } +} diff --git a/service/entityresolution/api_op_ListSchemaMappings.go b/service/entityresolution/api_op_ListSchemaMappings.go new file mode 100644 index 00000000000..600c4cc40e4 --- /dev/null +++ b/service/entityresolution/api_op_ListSchemaMappings.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all the SchemaMappings that have been created for an AWS +// account. +func (c *Client) ListSchemaMappings(ctx context.Context, params *ListSchemaMappingsInput, optFns ...func(*Options)) (*ListSchemaMappingsOutput, error) { + if params == nil { + params = &ListSchemaMappingsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListSchemaMappings", params, optFns, c.addOperationListSchemaMappingsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListSchemaMappingsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListSchemaMappingsInput struct { + + // The maximum number of objects returned per page. + MaxResults *int32 + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListSchemaMappingsOutput struct { + + // The pagination token from the previous ListDomains API call. + NextToken *string + + // A list of SchemaMappingSummary objects, each of which contain the fields + // SchemaName , SchemaArn , CreatedAt , UpdatedAt . + SchemaList []types.SchemaMappingSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListSchemaMappingsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListSchemaMappings{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListSchemaMappings{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSchemaMappings(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListSchemaMappingsAPIClient is a client that implements the ListSchemaMappings +// operation. +type ListSchemaMappingsAPIClient interface { + ListSchemaMappings(context.Context, *ListSchemaMappingsInput, ...func(*Options)) (*ListSchemaMappingsOutput, error) +} + +var _ ListSchemaMappingsAPIClient = (*Client)(nil) + +// ListSchemaMappingsPaginatorOptions is the paginator options for +// ListSchemaMappings +type ListSchemaMappingsPaginatorOptions struct { + // The maximum number of objects returned per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListSchemaMappingsPaginator is a paginator for ListSchemaMappings +type ListSchemaMappingsPaginator struct { + options ListSchemaMappingsPaginatorOptions + client ListSchemaMappingsAPIClient + params *ListSchemaMappingsInput + nextToken *string + firstPage bool +} + +// NewListSchemaMappingsPaginator returns a new ListSchemaMappingsPaginator +func NewListSchemaMappingsPaginator(client ListSchemaMappingsAPIClient, params *ListSchemaMappingsInput, optFns ...func(*ListSchemaMappingsPaginatorOptions)) *ListSchemaMappingsPaginator { + if params == nil { + params = &ListSchemaMappingsInput{} + } + + options := ListSchemaMappingsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListSchemaMappingsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListSchemaMappingsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListSchemaMappings page. +func (p *ListSchemaMappingsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListSchemaMappingsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListSchemaMappings(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListSchemaMappings(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "ListSchemaMappings", + } +} diff --git a/service/entityresolution/api_op_ListTagsForResource.go b/service/entityresolution/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..be0fde789e5 --- /dev/null +++ b/service/entityresolution/api_op_ListTagsForResource.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Displays the tags associated with an AWS Entity Resolution resource. In Entity +// Resolution, SchemaMapping , and MatchingWorkflow can be tagged. +func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if params == nil { + params = &ListTagsForResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsForResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsForResourceInput struct { + + // The ARN of the resource for which you want to view tags. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +type ListTagsForResourceOutput struct { + + // The tags used to organize, track, or control access for this resource. + // + // This member is required. + Tags map[string]string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "ListTagsForResource", + } +} diff --git a/service/entityresolution/api_op_StartMatchingJob.go b/service/entityresolution/api_op_StartMatchingJob.go new file mode 100644 index 00000000000..060cc01103e --- /dev/null +++ b/service/entityresolution/api_op_StartMatchingJob.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Starts the MatchingJob of a workflow. The workflow must have previously been +// created using the CreateMatchingWorkflow endpoint. +func (c *Client) StartMatchingJob(ctx context.Context, params *StartMatchingJobInput, optFns ...func(*Options)) (*StartMatchingJobOutput, error) { + if params == nil { + params = &StartMatchingJobInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartMatchingJob", params, optFns, c.addOperationStartMatchingJobMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartMatchingJobOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartMatchingJobInput struct { + + // The name of the matching job to be retrieved. + // + // This member is required. + WorkflowName *string + + noSmithyDocumentSerde +} + +type StartMatchingJobOutput struct { + + // The ID of the job. + // + // This member is required. + JobId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartMatchingJobMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartMatchingJob{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartMatchingJob{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpStartMatchingJobValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartMatchingJob(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartMatchingJob(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "StartMatchingJob", + } +} diff --git a/service/entityresolution/api_op_TagResource.go b/service/entityresolution/api_op_TagResource.go new file mode 100644 index 00000000000..481b7e11894 --- /dev/null +++ b/service/entityresolution/api_op_TagResource.go @@ -0,0 +1,134 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Assigns one or more tags (key-value pairs) to the specified AWS Entity +// Resolution resource. Tags can help you organize and categorize your resources. +// You can also use them to scope user permissions by granting a user permission to +// access or change only resources with certain tag values. In Entity Resolution, +// SchemaMapping , and MatchingWorkflow can be tagged. Tags don't have any +// semantic meaning to AWS and are interpreted strictly as strings of characters. +// You can use the TagResource action with a resource that already has tags. If +// you specify a new tag key, this tag is appended to the list of tags associated +// with the resource. If you specify a tag key that is already associated with the +// resource, the new tag value that you specify replaces the previous value for +// that tag. +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // The ARN of the resource for which you want to view tags. + // + // This member is required. + ResourceArn *string + + // The tags used to organize, track, or control access for this resource. + // + // This member is required. + Tags map[string]string + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "TagResource", + } +} diff --git a/service/entityresolution/api_op_UntagResource.go b/service/entityresolution/api_op_UntagResource.go new file mode 100644 index 00000000000..6c31359d83e --- /dev/null +++ b/service/entityresolution/api_op_UntagResource.go @@ -0,0 +1,125 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes one or more tags from the specified AWS Entity Resolution resource. In +// Entity Resolution, SchemaMapping , and MatchingWorkflow can be tagged. +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // The ARN of the resource for which you want to untag. + // + // This member is required. + ResourceArn *string + + // The list of tag keys to remove from the resource. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "UntagResource", + } +} diff --git a/service/entityresolution/api_op_UpdateMatchingWorkflow.go b/service/entityresolution/api_op_UpdateMatchingWorkflow.go new file mode 100644 index 00000000000..fefaec968d4 --- /dev/null +++ b/service/entityresolution/api_op_UpdateMatchingWorkflow.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates an existing MatchingWorkflow . This method is identical to +// CreateMatchingWorkflow , except it uses an HTTP PUT request instead of a POST +// request, and the MatchingWorkflow must already exist for the method to succeed. +func (c *Client) UpdateMatchingWorkflow(ctx context.Context, params *UpdateMatchingWorkflowInput, optFns ...func(*Options)) (*UpdateMatchingWorkflowOutput, error) { + if params == nil { + params = &UpdateMatchingWorkflowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateMatchingWorkflow", params, optFns, c.addOperationUpdateMatchingWorkflowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateMatchingWorkflowOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateMatchingWorkflowInput struct { + + // A list of InputSource objects, which have the fields InputSourceARN and + // SchemaName . + // + // This member is required. + InputSourceConfig []types.InputSource + + // A list of OutputSource objects, each of which contains fields OutputS3Path , + // ApplyNormalization , and Output . + // + // This member is required. + OutputSourceConfig []types.OutputSource + + // An object which defines the resolutionType and the ruleBasedProperties + // + // This member is required. + ResolutionTechniques *types.ResolutionTechniques + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // This member is required. + RoleArn *string + + // The name of the workflow to be retrieved. + // + // This member is required. + WorkflowName *string + + // A description of the workflow. + Description *string + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *types.IncrementalRunConfig + + noSmithyDocumentSerde +} + +type UpdateMatchingWorkflowOutput struct { + + // A list of InputSource objects, which have the fields InputSourceARN and + // SchemaName . + // + // This member is required. + InputSourceConfig []types.InputSource + + // A list of OutputSource objects, each of which contains fields OutputS3Path , + // ApplyNormalization , and Output . + // + // This member is required. + OutputSourceConfig []types.OutputSource + + // An object which defines the resolutionType and the ruleBasedProperties + // + // This member is required. + ResolutionTechniques *types.ResolutionTechniques + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // This member is required. + RoleArn *string + + // The name of the workflow. + // + // This member is required. + WorkflowName *string + + // A description of the workflow. + Description *string + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *types.IncrementalRunConfig + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateMatchingWorkflowMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateMatchingWorkflow{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateMatchingWorkflowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchingWorkflow(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateMatchingWorkflow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "entityresolution", + OperationName: "UpdateMatchingWorkflow", + } +} diff --git a/service/entityresolution/deserializers.go b/service/entityresolution/deserializers.go new file mode 100644 index 00000000000..344fdde9623 --- /dev/null +++ b/service/entityresolution/deserializers.go @@ -0,0 +1,4442 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" +) + +type awsRestjson1_deserializeOpCreateMatchingWorkflow struct { +} + +func (*awsRestjson1_deserializeOpCreateMatchingWorkflow) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateMatchingWorkflow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateMatchingWorkflow(response, &metadata) + } + output := &CreateMatchingWorkflowOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateMatchingWorkflowOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateMatchingWorkflow(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("ExceedsLimitException", errorCode): + return awsRestjson1_deserializeErrorExceedsLimitException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateMatchingWorkflowOutput(v **CreateMatchingWorkflowOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateMatchingWorkflowOutput + if *v == nil { + sv = &CreateMatchingWorkflowOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Description to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "incrementalRunConfig": + if err := awsRestjson1_deserializeDocumentIncrementalRunConfig(&sv.IncrementalRunConfig, value); err != nil { + return err + } + + case "inputSourceConfig": + if err := awsRestjson1_deserializeDocumentInputSourceConfig(&sv.InputSourceConfig, value); err != nil { + return err + } + + case "outputSourceConfig": + if err := awsRestjson1_deserializeDocumentOutputSourceConfig(&sv.OutputSourceConfig, value); err != nil { + return err + } + + case "resolutionTechniques": + if err := awsRestjson1_deserializeDocumentResolutionTechniques(&sv.ResolutionTechniques, value); err != nil { + return err + } + + case "roleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "workflowArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MatchingWorkflowArn to be of type string, got %T instead", value) + } + sv.WorkflowArn = ptr.String(jtv) + } + + case "workflowName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.WorkflowName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpCreateSchemaMapping struct { +} + +func (*awsRestjson1_deserializeOpCreateSchemaMapping) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateSchemaMapping) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateSchemaMapping(response, &metadata) + } + output := &CreateSchemaMappingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateSchemaMappingOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateSchemaMapping(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("ExceedsLimitException", errorCode): + return awsRestjson1_deserializeErrorExceedsLimitException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateSchemaMappingOutput(v **CreateSchemaMappingOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateSchemaMappingOutput + if *v == nil { + sv = &CreateSchemaMappingOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Description to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "mappedInputFields": + if err := awsRestjson1_deserializeDocumentSchemaInputAttributes(&sv.MappedInputFields, value); err != nil { + return err + } + + case "schemaArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SchemaMappingArn to be of type string, got %T instead", value) + } + sv.SchemaArn = ptr.String(jtv) + } + + case "schemaName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.SchemaName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDeleteMatchingWorkflow struct { +} + +func (*awsRestjson1_deserializeOpDeleteMatchingWorkflow) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteMatchingWorkflow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteMatchingWorkflow(response, &metadata) + } + output := &DeleteMatchingWorkflowOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentDeleteMatchingWorkflowOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteMatchingWorkflow(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDeleteMatchingWorkflowOutput(v **DeleteMatchingWorkflowOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteMatchingWorkflowOutput + if *v == nil { + sv = &DeleteMatchingWorkflowOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDeleteSchemaMapping struct { +} + +func (*awsRestjson1_deserializeOpDeleteSchemaMapping) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteSchemaMapping) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteSchemaMapping(response, &metadata) + } + output := &DeleteSchemaMappingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentDeleteSchemaMappingOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteSchemaMapping(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDeleteSchemaMappingOutput(v **DeleteSchemaMappingOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteSchemaMappingOutput + if *v == nil { + sv = &DeleteSchemaMappingOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetMatchId struct { +} + +func (*awsRestjson1_deserializeOpGetMatchId) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetMatchId) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetMatchId(response, &metadata) + } + output := &GetMatchIdOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetMatchIdOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetMatchId(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetMatchIdOutput(v **GetMatchIdOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetMatchIdOutput + if *v == nil { + sv = &GetMatchIdOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "matchId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.MatchId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetMatchingJob struct { +} + +func (*awsRestjson1_deserializeOpGetMatchingJob) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetMatchingJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetMatchingJob(response, &metadata) + } + output := &GetMatchingJobOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetMatchingJobOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetMatchingJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetMatchingJobOutput(v **GetMatchingJobOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetMatchingJobOutput + if *v == nil { + sv = &GetMatchingJobOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "endTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "errorDetails": + if err := awsRestjson1_deserializeDocumentErrorDetails(&sv.ErrorDetails, value); err != nil { + return err + } + + case "jobId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected JobId to be of type string, got %T instead", value) + } + sv.JobId = ptr.String(jtv) + } + + case "metrics": + if err := awsRestjson1_deserializeDocumentJobMetrics(&sv.Metrics, value); err != nil { + return err + } + + case "startTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected JobStatus to be of type string, got %T instead", value) + } + sv.Status = types.JobStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetMatchingWorkflow struct { +} + +func (*awsRestjson1_deserializeOpGetMatchingWorkflow) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetMatchingWorkflow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetMatchingWorkflow(response, &metadata) + } + output := &GetMatchingWorkflowOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetMatchingWorkflowOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetMatchingWorkflow(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetMatchingWorkflowOutput(v **GetMatchingWorkflowOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetMatchingWorkflowOutput + if *v == nil { + sv = &GetMatchingWorkflowOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Description to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "incrementalRunConfig": + if err := awsRestjson1_deserializeDocumentIncrementalRunConfig(&sv.IncrementalRunConfig, value); err != nil { + return err + } + + case "inputSourceConfig": + if err := awsRestjson1_deserializeDocumentInputSourceConfig(&sv.InputSourceConfig, value); err != nil { + return err + } + + case "outputSourceConfig": + if err := awsRestjson1_deserializeDocumentOutputSourceConfig(&sv.OutputSourceConfig, value); err != nil { + return err + } + + case "resolutionTechniques": + if err := awsRestjson1_deserializeDocumentResolutionTechniques(&sv.ResolutionTechniques, value); err != nil { + return err + } + + case "roleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "tags": + if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + return err + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "workflowArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MatchingWorkflowArn to be of type string, got %T instead", value) + } + sv.WorkflowArn = ptr.String(jtv) + } + + case "workflowName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.WorkflowName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetSchemaMapping struct { +} + +func (*awsRestjson1_deserializeOpGetSchemaMapping) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetSchemaMapping) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetSchemaMapping(response, &metadata) + } + output := &GetSchemaMappingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetSchemaMappingOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetSchemaMapping(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetSchemaMappingOutput(v **GetSchemaMappingOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetSchemaMappingOutput + if *v == nil { + sv = &GetSchemaMappingOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Description to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "mappedInputFields": + if err := awsRestjson1_deserializeDocumentSchemaInputAttributes(&sv.MappedInputFields, value); err != nil { + return err + } + + case "schemaArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SchemaMappingArn to be of type string, got %T instead", value) + } + sv.SchemaArn = ptr.String(jtv) + } + + case "schemaName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.SchemaName = ptr.String(jtv) + } + + case "tags": + if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + return err + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListMatchingJobs struct { +} + +func (*awsRestjson1_deserializeOpListMatchingJobs) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListMatchingJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListMatchingJobs(response, &metadata) + } + output := &ListMatchingJobsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListMatchingJobsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListMatchingJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListMatchingJobsOutput(v **ListMatchingJobsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListMatchingJobsOutput + if *v == nil { + sv = &ListMatchingJobsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "jobs": + if err := awsRestjson1_deserializeDocumentJobList(&sv.Jobs, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListMatchingWorkflows struct { +} + +func (*awsRestjson1_deserializeOpListMatchingWorkflows) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListMatchingWorkflows) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListMatchingWorkflows(response, &metadata) + } + output := &ListMatchingWorkflowsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListMatchingWorkflowsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListMatchingWorkflows(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListMatchingWorkflowsOutput(v **ListMatchingWorkflowsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListMatchingWorkflowsOutput + if *v == nil { + sv = &ListMatchingWorkflowsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "workflowSummaries": + if err := awsRestjson1_deserializeDocumentMatchingWorkflowList(&sv.WorkflowSummaries, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListSchemaMappings struct { +} + +func (*awsRestjson1_deserializeOpListSchemaMappings) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListSchemaMappings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListSchemaMappings(response, &metadata) + } + output := &ListSchemaMappingsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListSchemaMappingsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListSchemaMappings(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListSchemaMappingsOutput(v **ListSchemaMappingsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListSchemaMappingsOutput + if *v == nil { + sv = &ListSchemaMappingsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "schemaList": + if err := awsRestjson1_deserializeDocumentSchemaMappingList(&sv.SchemaList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTagsForResource struct { +} + +func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpStartMatchingJob struct { +} + +func (*awsRestjson1_deserializeOpStartMatchingJob) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartMatchingJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartMatchingJob(response, &metadata) + } + output := &StartMatchingJobOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartMatchingJobOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartMatchingJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("ExceedsLimitException", errorCode): + return awsRestjson1_deserializeErrorExceedsLimitException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartMatchingJobOutput(v **StartMatchingJobOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartMatchingJobOutput + if *v == nil { + sv = &StartMatchingJobOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "jobId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected JobId to be of type string, got %T instead", value) + } + sv.JobId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpTagResource struct { +} + +func (*awsRestjson1_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpUntagResource struct { +} + +func (*awsRestjson1_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpUpdateMatchingWorkflow struct { +} + +func (*awsRestjson1_deserializeOpUpdateMatchingWorkflow) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUpdateMatchingWorkflow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUpdateMatchingWorkflow(response, &metadata) + } + output := &UpdateMatchingWorkflowOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentUpdateMatchingWorkflowOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUpdateMatchingWorkflow(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentUpdateMatchingWorkflowOutput(v **UpdateMatchingWorkflowOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateMatchingWorkflowOutput + if *v == nil { + sv = &UpdateMatchingWorkflowOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Description to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "incrementalRunConfig": + if err := awsRestjson1_deserializeDocumentIncrementalRunConfig(&sv.IncrementalRunConfig, value); err != nil { + return err + } + + case "inputSourceConfig": + if err := awsRestjson1_deserializeDocumentInputSourceConfig(&sv.InputSourceConfig, value); err != nil { + return err + } + + case "outputSourceConfig": + if err := awsRestjson1_deserializeDocumentOutputSourceConfig(&sv.OutputSourceConfig, value); err != nil { + return err + } + + case "resolutionTechniques": + if err := awsRestjson1_deserializeDocumentResolutionTechniques(&sv.ResolutionTechniques, value); err != nil { + return err + } + + case "roleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "workflowName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.WorkflowName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ConflictException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorExceedsLimitException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExceedsLimitException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentExceedsLimitException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ThrottlingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentThrottlingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ValidationException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentValidationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConflictException + if *v == nil { + sv = &types.ConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentErrorDetails(v **types.ErrorDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ErrorDetails + if *v == nil { + sv = &types.ErrorDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "errorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentExceedsLimitException(v **types.ExceedsLimitException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExceedsLimitException + if *v == nil { + sv = &types.ExceedsLimitException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentIncrementalRunConfig(v **types.IncrementalRunConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncrementalRunConfig + if *v == nil { + sv = &types.IncrementalRunConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "incrementalRunType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IncrementalRunType to be of type string, got %T instead", value) + } + sv.IncrementalRunType = types.IncrementalRunType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInputSource(v **types.InputSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InputSource + if *v == nil { + sv = &types.InputSource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "applyNormalization": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.ApplyNormalization = ptr.Bool(jtv) + } + + case "inputSourceARN": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.InputSourceARN = ptr.String(jtv) + } + + case "schemaName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.SchemaName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInputSourceConfig(v *[]types.InputSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.InputSource + if *v == nil { + cv = []types.InputSource{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.InputSource + destAddr := &col + if err := awsRestjson1_deserializeDocumentInputSource(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentJobList(v *[]types.JobSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.JobSummary + if *v == nil { + cv = []types.JobSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.JobSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentJobSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentJobMetrics(v **types.JobMetrics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.JobMetrics + if *v == nil { + sv = &types.JobMetrics{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "inputRecords": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InputRecords = ptr.Int32(int32(i64)) + } + + case "matchIDs": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MatchIDs = ptr.Int32(int32(i64)) + } + + case "recordsNotProcessed": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.RecordsNotProcessed = ptr.Int32(int32(i64)) + } + + case "totalRecordsProcessed": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalRecordsProcessed = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentJobSummary(v **types.JobSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.JobSummary + if *v == nil { + sv = &types.JobSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "endTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "jobId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected JobId to be of type string, got %T instead", value) + } + sv.JobId = ptr.String(jtv) + } + + case "startTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected JobStatus to be of type string, got %T instead", value) + } + sv.Status = types.JobStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentMatchingKeys(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentMatchingWorkflowList(v *[]types.MatchingWorkflowSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MatchingWorkflowSummary + if *v == nil { + cv = []types.MatchingWorkflowSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MatchingWorkflowSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentMatchingWorkflowSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentMatchingWorkflowSummary(v **types.MatchingWorkflowSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MatchingWorkflowSummary + if *v == nil { + sv = &types.MatchingWorkflowSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "workflowArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MatchingWorkflowArn to be of type string, got %T instead", value) + } + sv.WorkflowArn = ptr.String(jtv) + } + + case "workflowName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.WorkflowName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentOutputAttribute(v **types.OutputAttribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OutputAttribute + if *v == nil { + sv = &types.OutputAttribute{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "hashed": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Hashed = ptr.Bool(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentOutputAttributes(v *[]types.OutputAttribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.OutputAttribute + if *v == nil { + cv = []types.OutputAttribute{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.OutputAttribute + destAddr := &col + if err := awsRestjson1_deserializeDocumentOutputAttribute(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentOutputSource(v **types.OutputSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OutputSource + if *v == nil { + sv = &types.OutputSource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "applyNormalization": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.ApplyNormalization = ptr.Bool(jtv) + } + + case "KMSArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KMSArn to be of type string, got %T instead", value) + } + sv.KMSArn = ptr.String(jtv) + } + + case "output": + if err := awsRestjson1_deserializeDocumentOutputAttributes(&sv.Output, value); err != nil { + return err + } + + case "outputS3Path": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.OutputS3Path = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentOutputSourceConfig(v *[]types.OutputSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.OutputSource + if *v == nil { + cv = []types.OutputSource{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.OutputSource + destAddr := &col + if err := awsRestjson1_deserializeDocumentOutputSource(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentResolutionTechniques(v **types.ResolutionTechniques, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResolutionTechniques + if *v == nil { + sv = &types.ResolutionTechniques{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "resolutionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResolutionType to be of type string, got %T instead", value) + } + sv.ResolutionType = types.ResolutionType(jtv) + } + + case "ruleBasedProperties": + if err := awsRestjson1_deserializeDocumentRuleBasedProperties(&sv.RuleBasedProperties, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRule(v **types.Rule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Rule + if *v == nil { + sv = &types.Rule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "matchingKeys": + if err := awsRestjson1_deserializeDocumentMatchingKeys(&sv.MatchingKeys, value); err != nil { + return err + } + + case "ruleName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.RuleName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRuleBasedProperties(v **types.RuleBasedProperties, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RuleBasedProperties + if *v == nil { + sv = &types.RuleBasedProperties{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "attributeMatchingModel": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeMatchingModel to be of type string, got %T instead", value) + } + sv.AttributeMatchingModel = types.AttributeMatchingModel(jtv) + } + + case "rules": + if err := awsRestjson1_deserializeDocumentRuleList(&sv.Rules, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRuleList(v *[]types.Rule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Rule + if *v == nil { + cv = []types.Rule{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Rule + destAddr := &col + if err := awsRestjson1_deserializeDocumentRule(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentSchemaInputAttribute(v **types.SchemaInputAttribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SchemaInputAttribute + if *v == nil { + sv = &types.SchemaInputAttribute{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "fieldName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value) + } + sv.FieldName = ptr.String(jtv) + } + + case "groupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value) + } + sv.GroupName = ptr.String(jtv) + } + + case "matchKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value) + } + sv.MatchKey = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SchemaAttributeType to be of type string, got %T instead", value) + } + sv.Type = types.SchemaAttributeType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSchemaInputAttributes(v *[]types.SchemaInputAttribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SchemaInputAttribute + if *v == nil { + cv = []types.SchemaInputAttribute{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SchemaInputAttribute + destAddr := &col + if err := awsRestjson1_deserializeDocumentSchemaInputAttribute(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentSchemaMappingList(v *[]types.SchemaMappingSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SchemaMappingSummary + if *v == nil { + cv = []types.SchemaMappingSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SchemaMappingSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentSchemaMappingSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentSchemaMappingSummary(v **types.SchemaMappingSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SchemaMappingSummary + if *v == nil { + sv = &types.SchemaMappingSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "schemaArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SchemaMappingArn to be of type string, got %T instead", value) + } + sv.SchemaArn = ptr.String(jtv) + } + + case "schemaName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityName to be of type string, got %T instead", value) + } + sv.SchemaName = ptr.String(jtv) + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTagMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsRestjson1_deserializeDocumentThrottlingException(v **types.ThrottlingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ThrottlingException + if *v == nil { + sv = &types.ThrottlingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ValidationException + if *v == nil { + sv = &types.ValidationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/service/entityresolution/doc.go b/service/entityresolution/doc.go new file mode 100644 index 00000000000..7c3f9ad6187 --- /dev/null +++ b/service/entityresolution/doc.go @@ -0,0 +1,20 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package entityresolution provides the API client, operations, and parameter +// types for AWS EntityResolution. +// +// Welcome to the AWS Entity Resolution API Reference. AWS Entity Resolution is an +// AWS service that provides pre-configured entity resolution capabilities that +// enable developers and analysts at advertising and marketing companies to build +// an accurate and complete view of their consumers. With AWS Entity Resolution, +// you have the ability to match source records containing consumer identifiers, +// such as name, email address, and phone number. This holds true even when these +// records have incomplete or conflicting identifiers. For example, AWS Entity +// Resolution can effectively match a source record from a customer relationship +// management (CRM) system, which includes account information like first name, +// last name, postal address, phone number, and email address, with a source record +// from a marketing system containing campaign information, such as username and +// email address. To learn more about AWS Entity Resolution concepts, procedures, +// and best practices, see the AWS Entity Resolution User Guide (https://docs.aws.amazon.com/entityresolution/latest/userguide/what-is-service.html) +// . +package entityresolution diff --git a/service/entityresolution/endpoints.go b/service/entityresolution/endpoints.go new file mode 100644 index 00000000000..9ccd27af6b1 --- /dev/null +++ b/service/entityresolution/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/entityresolution/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "entityresolution" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/service/entityresolution/generated.json b/service/entityresolution/generated.json new file mode 100644 index 00000000000..3f79dad3deb --- /dev/null +++ b/service/entityresolution/generated.json @@ -0,0 +1,43 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateMatchingWorkflow.go", + "api_op_CreateSchemaMapping.go", + "api_op_DeleteMatchingWorkflow.go", + "api_op_DeleteSchemaMapping.go", + "api_op_GetMatchId.go", + "api_op_GetMatchingJob.go", + "api_op_GetMatchingWorkflow.go", + "api_op_GetSchemaMapping.go", + "api_op_ListMatchingJobs.go", + "api_op_ListMatchingWorkflows.go", + "api_op_ListSchemaMappings.go", + "api_op_ListTagsForResource.go", + "api_op_StartMatchingJob.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UpdateMatchingWorkflow.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/entityresolution", + "unstable": false +} diff --git a/service/entityresolution/go.mod b/service/entityresolution/go.mod new file mode 100644 index 00000000000..10cce3e7014 --- /dev/null +++ b/service/entityresolution/go.mod @@ -0,0 +1,16 @@ +module github.com/aws/aws-sdk-go-v2/service/entityresolution + +go 1.15 + +require ( + github.com/aws/aws-sdk-go-v2 v1.19.0 + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 + github.com/aws/smithy-go v1.13.5 +) + +replace github.com/aws/aws-sdk-go-v2 => ../../ + +replace github.com/aws/aws-sdk-go-v2/internal/configsources => ../../internal/configsources/ + +replace github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 => ../../internal/endpoints/v2/ diff --git a/service/entityresolution/go.sum b/service/entityresolution/go.sum new file mode 100644 index 00000000000..6f859610e44 --- /dev/null +++ b/service/entityresolution/go.sum @@ -0,0 +1,11 @@ +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/service/entityresolution/go_module_metadata.go b/service/entityresolution/go_module_metadata.go new file mode 100644 index 00000000000..9ce4b2d4f8d --- /dev/null +++ b/service/entityresolution/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package entityresolution + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "tip" diff --git a/service/entityresolution/internal/endpoints/endpoints.go b/service/entityresolution/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..02fe4db8f92 --- /dev/null +++ b/service/entityresolution/internal/endpoints/endpoints.go @@ -0,0 +1,296 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver EntityResolution endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "entityresolution.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "entityresolution-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "entityresolution-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "entityresolution.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "entityresolution.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "entityresolution-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "entityresolution-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "entityresolution.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "entityresolution-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "entityresolution.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "entityresolution-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "entityresolution.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "entityresolution-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "entityresolution.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "entityresolution-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "entityresolution.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "entityresolution.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "entityresolution-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "entityresolution-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "entityresolution.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + }, +} diff --git a/service/entityresolution/internal/endpoints/endpoints_test.go b/service/entityresolution/internal/endpoints/endpoints_test.go new file mode 100644 index 00000000000..08e5da2d833 --- /dev/null +++ b/service/entityresolution/internal/endpoints/endpoints_test.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "testing" +) + +func TestRegexCompile(t *testing.T) { + _ = defaultPartitions +} diff --git a/service/entityresolution/protocol_test.go b/service/entityresolution/protocol_test.go new file mode 100644 index 00000000000..8f8a6cbf767 --- /dev/null +++ b/service/entityresolution/protocol_test.go @@ -0,0 +1,3 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution diff --git a/service/entityresolution/serializers.go b/service/entityresolution/serializers.go new file mode 100644 index 00000000000..19a42b5b26a --- /dev/null +++ b/service/entityresolution/serializers.go @@ -0,0 +1,1416 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateMatchingWorkflow struct { +} + +func (*awsRestjson1_serializeOpCreateMatchingWorkflow) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateMatchingWorkflow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateMatchingWorkflowInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateMatchingWorkflowInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateMatchingWorkflowInput(v *CreateMatchingWorkflowInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateMatchingWorkflowInput(v *CreateMatchingWorkflowInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) + } + + if v.IncrementalRunConfig != nil { + ok := object.Key("incrementalRunConfig") + if err := awsRestjson1_serializeDocumentIncrementalRunConfig(v.IncrementalRunConfig, ok); err != nil { + return err + } + } + + if v.InputSourceConfig != nil { + ok := object.Key("inputSourceConfig") + if err := awsRestjson1_serializeDocumentInputSourceConfig(v.InputSourceConfig, ok); err != nil { + return err + } + } + + if v.OutputSourceConfig != nil { + ok := object.Key("outputSourceConfig") + if err := awsRestjson1_serializeDocumentOutputSourceConfig(v.OutputSourceConfig, ok); err != nil { + return err + } + } + + if v.ResolutionTechniques != nil { + ok := object.Key("resolutionTechniques") + if err := awsRestjson1_serializeDocumentResolutionTechniques(v.ResolutionTechniques, ok); err != nil { + return err + } + } + + if v.RoleArn != nil { + ok := object.Key("roleArn") + ok.String(*v.RoleArn) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsRestjson1_serializeDocumentTagMap(v.Tags, ok); err != nil { + return err + } + } + + if v.WorkflowName != nil { + ok := object.Key("workflowName") + ok.String(*v.WorkflowName) + } + + return nil +} + +type awsRestjson1_serializeOpCreateSchemaMapping struct { +} + +func (*awsRestjson1_serializeOpCreateSchemaMapping) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateSchemaMapping) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateSchemaMappingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/schemas") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateSchemaMappingInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateSchemaMappingInput(v *CreateSchemaMappingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateSchemaMappingInput(v *CreateSchemaMappingInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) + } + + if v.MappedInputFields != nil { + ok := object.Key("mappedInputFields") + if err := awsRestjson1_serializeDocumentSchemaInputAttributes(v.MappedInputFields, ok); err != nil { + return err + } + } + + if v.SchemaName != nil { + ok := object.Key("schemaName") + ok.String(*v.SchemaName) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsRestjson1_serializeDocumentTagMap(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpDeleteMatchingWorkflow struct { +} + +func (*awsRestjson1_serializeOpDeleteMatchingWorkflow) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteMatchingWorkflow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteMatchingWorkflowInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows/{workflowName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteMatchingWorkflowInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteMatchingWorkflowInput(v *DeleteMatchingWorkflowInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.WorkflowName == nil || len(*v.WorkflowName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workflowName must not be empty")} + } + if v.WorkflowName != nil { + if err := encoder.SetURI("workflowName").String(*v.WorkflowName); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpDeleteSchemaMapping struct { +} + +func (*awsRestjson1_serializeOpDeleteSchemaMapping) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteSchemaMapping) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteSchemaMappingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/schemas/{schemaName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteSchemaMappingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteSchemaMappingInput(v *DeleteSchemaMappingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.SchemaName == nil || len(*v.SchemaName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member schemaName must not be empty")} + } + if v.SchemaName != nil { + if err := encoder.SetURI("schemaName").String(*v.SchemaName); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetMatchId struct { +} + +func (*awsRestjson1_serializeOpGetMatchId) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetMatchId) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetMatchIdInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows/{workflowName}/matches") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetMatchIdInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentGetMatchIdInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetMatchIdInput(v *GetMatchIdInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.WorkflowName == nil || len(*v.WorkflowName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workflowName must not be empty")} + } + if v.WorkflowName != nil { + if err := encoder.SetURI("workflowName").String(*v.WorkflowName); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentGetMatchIdInput(v *GetMatchIdInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Record != nil { + ok := object.Key("record") + if err := awsRestjson1_serializeDocumentRecordAttributeMap(v.Record, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetMatchingJob struct { +} + +func (*awsRestjson1_serializeOpGetMatchingJob) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetMatchingJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetMatchingJobInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows/{workflowName}/jobs/{jobId}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetMatchingJobInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetMatchingJobInput(v *GetMatchingJobInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.JobId == nil || len(*v.JobId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member jobId must not be empty")} + } + if v.JobId != nil { + if err := encoder.SetURI("jobId").String(*v.JobId); err != nil { + return err + } + } + + if v.WorkflowName == nil || len(*v.WorkflowName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workflowName must not be empty")} + } + if v.WorkflowName != nil { + if err := encoder.SetURI("workflowName").String(*v.WorkflowName); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetMatchingWorkflow struct { +} + +func (*awsRestjson1_serializeOpGetMatchingWorkflow) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetMatchingWorkflow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetMatchingWorkflowInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows/{workflowName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetMatchingWorkflowInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetMatchingWorkflowInput(v *GetMatchingWorkflowInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.WorkflowName == nil || len(*v.WorkflowName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workflowName must not be empty")} + } + if v.WorkflowName != nil { + if err := encoder.SetURI("workflowName").String(*v.WorkflowName); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetSchemaMapping struct { +} + +func (*awsRestjson1_serializeOpGetSchemaMapping) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetSchemaMapping) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSchemaMappingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/schemas/{schemaName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetSchemaMappingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetSchemaMappingInput(v *GetSchemaMappingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.SchemaName == nil || len(*v.SchemaName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member schemaName must not be empty")} + } + if v.SchemaName != nil { + if err := encoder.SetURI("schemaName").String(*v.SchemaName); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpListMatchingJobs struct { +} + +func (*awsRestjson1_serializeOpListMatchingJobs) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListMatchingJobs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMatchingJobsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows/{workflowName}/jobs") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListMatchingJobsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListMatchingJobsInput(v *ListMatchingJobsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != nil { + encoder.SetQuery("maxResults").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + if v.WorkflowName == nil || len(*v.WorkflowName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workflowName must not be empty")} + } + if v.WorkflowName != nil { + if err := encoder.SetURI("workflowName").String(*v.WorkflowName); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpListMatchingWorkflows struct { +} + +func (*awsRestjson1_serializeOpListMatchingWorkflows) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListMatchingWorkflows) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMatchingWorkflowsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListMatchingWorkflowsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListMatchingWorkflowsInput(v *ListMatchingWorkflowsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != nil { + encoder.SetQuery("maxResults").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListSchemaMappings struct { +} + +func (*awsRestjson1_serializeOpListSchemaMappings) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListSchemaMappings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListSchemaMappingsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/schemas") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListSchemaMappingsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListSchemaMappingsInput(v *ListSchemaMappingsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != nil { + encoder.SetQuery("maxResults").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListTagsForResource struct { +} + +func (*awsRestjson1_serializeOpListTagsForResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsForResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/tags/{resourceArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(v *ListTagsForResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ResourceArn == nil || len(*v.ResourceArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")} + } + if v.ResourceArn != nil { + if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpStartMatchingJob struct { +} + +func (*awsRestjson1_serializeOpStartMatchingJob) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartMatchingJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartMatchingJobInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows/{workflowName}/jobs") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsStartMatchingJobInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartMatchingJobInput(v *StartMatchingJobInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.WorkflowName == nil || len(*v.WorkflowName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workflowName must not be empty")} + } + if v.WorkflowName != nil { + if err := encoder.SetURI("workflowName").String(*v.WorkflowName); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpTagResource struct { +} + +func (*awsRestjson1_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/tags/{resourceArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsTagResourceInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsTagResourceInput(v *TagResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ResourceArn == nil || len(*v.ResourceArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")} + } + if v.ResourceArn != nil { + if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsRestjson1_serializeDocumentTagMap(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpUntagResource struct { +} + +func (*awsRestjson1_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/tags/{resourceArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsUntagResourceInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUntagResourceInput(v *UntagResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ResourceArn == nil || len(*v.ResourceArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")} + } + if v.ResourceArn != nil { + if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil { + return err + } + } + + if v.TagKeys != nil { + for i := range v.TagKeys { + encoder.AddQuery("tagKeys").String(v.TagKeys[i]) + } + } + + return nil +} + +type awsRestjson1_serializeOpUpdateMatchingWorkflow struct { +} + +func (*awsRestjson1_serializeOpUpdateMatchingWorkflow) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUpdateMatchingWorkflow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateMatchingWorkflowInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/matchingworkflows/{workflowName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsUpdateMatchingWorkflowInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentUpdateMatchingWorkflowInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUpdateMatchingWorkflowInput(v *UpdateMatchingWorkflowInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.WorkflowName == nil || len(*v.WorkflowName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workflowName must not be empty")} + } + if v.WorkflowName != nil { + if err := encoder.SetURI("workflowName").String(*v.WorkflowName); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentUpdateMatchingWorkflowInput(v *UpdateMatchingWorkflowInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) + } + + if v.IncrementalRunConfig != nil { + ok := object.Key("incrementalRunConfig") + if err := awsRestjson1_serializeDocumentIncrementalRunConfig(v.IncrementalRunConfig, ok); err != nil { + return err + } + } + + if v.InputSourceConfig != nil { + ok := object.Key("inputSourceConfig") + if err := awsRestjson1_serializeDocumentInputSourceConfig(v.InputSourceConfig, ok); err != nil { + return err + } + } + + if v.OutputSourceConfig != nil { + ok := object.Key("outputSourceConfig") + if err := awsRestjson1_serializeDocumentOutputSourceConfig(v.OutputSourceConfig, ok); err != nil { + return err + } + } + + if v.ResolutionTechniques != nil { + ok := object.Key("resolutionTechniques") + if err := awsRestjson1_serializeDocumentResolutionTechniques(v.ResolutionTechniques, ok); err != nil { + return err + } + } + + if v.RoleArn != nil { + ok := object.Key("roleArn") + ok.String(*v.RoleArn) + } + + return nil +} + +func awsRestjson1_serializeDocumentIncrementalRunConfig(v *types.IncrementalRunConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.IncrementalRunType) > 0 { + ok := object.Key("incrementalRunType") + ok.String(string(v.IncrementalRunType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentInputSource(v *types.InputSource, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ApplyNormalization != nil { + ok := object.Key("applyNormalization") + ok.Boolean(*v.ApplyNormalization) + } + + if v.InputSourceARN != nil { + ok := object.Key("inputSourceARN") + ok.String(*v.InputSourceARN) + } + + if v.SchemaName != nil { + ok := object.Key("schemaName") + ok.String(*v.SchemaName) + } + + return nil +} + +func awsRestjson1_serializeDocumentInputSourceConfig(v []types.InputSource, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentInputSource(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentMatchingKeys(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentOutputAttribute(v *types.OutputAttribute, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Hashed != nil { + ok := object.Key("hashed") + ok.Boolean(*v.Hashed) + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + return nil +} + +func awsRestjson1_serializeDocumentOutputAttributes(v []types.OutputAttribute, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentOutputAttribute(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentOutputSource(v *types.OutputSource, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ApplyNormalization != nil { + ok := object.Key("applyNormalization") + ok.Boolean(*v.ApplyNormalization) + } + + if v.KMSArn != nil { + ok := object.Key("KMSArn") + ok.String(*v.KMSArn) + } + + if v.Output != nil { + ok := object.Key("output") + if err := awsRestjson1_serializeDocumentOutputAttributes(v.Output, ok); err != nil { + return err + } + } + + if v.OutputS3Path != nil { + ok := object.Key("outputS3Path") + ok.String(*v.OutputS3Path) + } + + return nil +} + +func awsRestjson1_serializeDocumentOutputSourceConfig(v []types.OutputSource, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentOutputSource(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentRecordAttributeMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsRestjson1_serializeDocumentResolutionTechniques(v *types.ResolutionTechniques, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ResolutionType) > 0 { + ok := object.Key("resolutionType") + ok.String(string(v.ResolutionType)) + } + + if v.RuleBasedProperties != nil { + ok := object.Key("ruleBasedProperties") + if err := awsRestjson1_serializeDocumentRuleBasedProperties(v.RuleBasedProperties, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentRule(v *types.Rule, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MatchingKeys != nil { + ok := object.Key("matchingKeys") + if err := awsRestjson1_serializeDocumentMatchingKeys(v.MatchingKeys, ok); err != nil { + return err + } + } + + if v.RuleName != nil { + ok := object.Key("ruleName") + ok.String(*v.RuleName) + } + + return nil +} + +func awsRestjson1_serializeDocumentRuleBasedProperties(v *types.RuleBasedProperties, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.AttributeMatchingModel) > 0 { + ok := object.Key("attributeMatchingModel") + ok.String(string(v.AttributeMatchingModel)) + } + + if v.Rules != nil { + ok := object.Key("rules") + if err := awsRestjson1_serializeDocumentRuleList(v.Rules, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentRuleList(v []types.Rule, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentRule(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentSchemaInputAttribute(v *types.SchemaInputAttribute, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.FieldName != nil { + ok := object.Key("fieldName") + ok.String(*v.FieldName) + } + + if v.GroupName != nil { + ok := object.Key("groupName") + ok.String(*v.GroupName) + } + + if v.MatchKey != nil { + ok := object.Key("matchKey") + ok.String(*v.MatchKey) + } + + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsRestjson1_serializeDocumentSchemaInputAttributes(v []types.SchemaInputAttribute, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentSchemaInputAttribute(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentTagMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} diff --git a/service/entityresolution/types/enums.go b/service/entityresolution/types/enums.go new file mode 100644 index 00000000000..49f96f3258d --- /dev/null +++ b/service/entityresolution/types/enums.go @@ -0,0 +1,129 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AttributeMatchingModel string + +// Enum values for AttributeMatchingModel +const ( + AttributeMatchingModelOneToOne AttributeMatchingModel = "ONE_TO_ONE" + AttributeMatchingModelManyToMany AttributeMatchingModel = "MANY_TO_MANY" +) + +// Values returns all known values for AttributeMatchingModel. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (AttributeMatchingModel) Values() []AttributeMatchingModel { + return []AttributeMatchingModel{ + "ONE_TO_ONE", + "MANY_TO_MANY", + } +} + +type IncrementalRunType string + +// Enum values for IncrementalRunType +const ( + IncrementalRunTypeImmediate IncrementalRunType = "IMMEDIATE" +) + +// Values returns all known values for IncrementalRunType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (IncrementalRunType) Values() []IncrementalRunType { + return []IncrementalRunType{ + "IMMEDIATE", + } +} + +type JobStatus string + +// Enum values for JobStatus +const ( + JobStatusRunning JobStatus = "RUNNING" + JobStatusSucceeded JobStatus = "SUCCEEDED" + JobStatusFailed JobStatus = "FAILED" + JobStatusQueued JobStatus = "QUEUED" +) + +// Values returns all known values for JobStatus. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (JobStatus) Values() []JobStatus { + return []JobStatus{ + "RUNNING", + "SUCCEEDED", + "FAILED", + "QUEUED", + } +} + +type ResolutionType string + +// Enum values for ResolutionType +const ( + ResolutionTypeRuleMatching ResolutionType = "RULE_MATCHING" + ResolutionTypeMlMatching ResolutionType = "ML_MATCHING" +) + +// Values returns all known values for ResolutionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ResolutionType) Values() []ResolutionType { + return []ResolutionType{ + "RULE_MATCHING", + "ML_MATCHING", + } +} + +type SchemaAttributeType string + +// Enum values for SchemaAttributeType +const ( + SchemaAttributeTypeName SchemaAttributeType = "NAME" + SchemaAttributeTypeNameFirst SchemaAttributeType = "NAME_FIRST" + SchemaAttributeTypeNameMiddle SchemaAttributeType = "NAME_MIDDLE" + SchemaAttributeTypeNameLast SchemaAttributeType = "NAME_LAST" + SchemaAttributeTypeAddress SchemaAttributeType = "ADDRESS" + SchemaAttributeTypeAddressStreet1 SchemaAttributeType = "ADDRESS_STREET1" + SchemaAttributeTypeAddressStreet2 SchemaAttributeType = "ADDRESS_STREET2" + SchemaAttributeTypeAddressStreet3 SchemaAttributeType = "ADDRESS_STREET3" + SchemaAttributeTypeAddressCity SchemaAttributeType = "ADDRESS_CITY" + SchemaAttributeTypeAddressState SchemaAttributeType = "ADDRESS_STATE" + SchemaAttributeTypeAddressCountry SchemaAttributeType = "ADDRESS_COUNTRY" + SchemaAttributeTypeAddressPostalcode SchemaAttributeType = "ADDRESS_POSTALCODE" + SchemaAttributeTypePhone SchemaAttributeType = "PHONE" + SchemaAttributeTypePhoneNumber SchemaAttributeType = "PHONE_NUMBER" + SchemaAttributeTypePhoneCountrycode SchemaAttributeType = "PHONE_COUNTRYCODE" + SchemaAttributeTypeEmailAddress SchemaAttributeType = "EMAIL_ADDRESS" + SchemaAttributeTypeUniqueId SchemaAttributeType = "UNIQUE_ID" + SchemaAttributeTypeDate SchemaAttributeType = "DATE" + SchemaAttributeTypeString SchemaAttributeType = "STRING" +) + +// Values returns all known values for SchemaAttributeType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (SchemaAttributeType) Values() []SchemaAttributeType { + return []SchemaAttributeType{ + "NAME", + "NAME_FIRST", + "NAME_MIDDLE", + "NAME_LAST", + "ADDRESS", + "ADDRESS_STREET1", + "ADDRESS_STREET2", + "ADDRESS_STREET3", + "ADDRESS_CITY", + "ADDRESS_STATE", + "ADDRESS_COUNTRY", + "ADDRESS_POSTALCODE", + "PHONE", + "PHONE_NUMBER", + "PHONE_COUNTRYCODE", + "EMAIL_ADDRESS", + "UNIQUE_ID", + "DATE", + "STRING", + } +} diff --git a/service/entityresolution/types/errors.go b/service/entityresolution/types/errors.go new file mode 100644 index 00000000000..28befab7c5d --- /dev/null +++ b/service/entityresolution/types/errors.go @@ -0,0 +1,196 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// You do not have sufficient access to perform this action. HTTP Status Code: 403 +type AccessDeniedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AccessDeniedException" + } + return *e.ErrorCodeOverride +} +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request could not be processed because of conflict in the current state of +// the resource. Example: Workflow already exists, Schema already exists, Workflow +// is currently running, etc. HTTP Status Code: 400 +type ConflictException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ConflictException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ConflictException" + } + return *e.ErrorCodeOverride +} +func (e *ConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because it attempted to create resources beyond the +// current AWS Entity Resolution account limits. The error message describes the +// limit exceeded. HTTP Status Code: 402 +type ExceedsLimitException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExceedsLimitException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExceedsLimitException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExceedsLimitException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExceedsLimitException" + } + return *e.ErrorCodeOverride +} +func (e *ExceedsLimitException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +type InternalServerException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerException" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The resource could not be found. HTTP Status Code: 404 +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was denied due to request throttling. HTTP Status Code: 429 +type ThrottlingException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ThrottlingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ThrottlingException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ThrottlingException" + } + return *e.ErrorCodeOverride +} +func (e *ThrottlingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +type ValidationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ValidationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ValidationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ValidationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ValidationException" + } + return *e.ErrorCodeOverride +} +func (e *ValidationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/service/entityresolution/types/types.go b/service/entityresolution/types/types.go new file mode 100644 index 00000000000..034daf8be8b --- /dev/null +++ b/service/entityresolution/types/types.go @@ -0,0 +1,282 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// An object containing an error message, if there was an error. +type ErrorDetails struct { + + // The error message from the job, if there is one. + ErrorMessage *string + + noSmithyDocumentSerde +} + +// An object which defines an incremental run type and has only incrementalRunType +// as a field. +type IncrementalRunConfig struct { + + // The type of incremental run. It takes only one value: IMMEDIATE . + IncrementalRunType IncrementalRunType + + noSmithyDocumentSerde +} + +// An object containing InputSourceARN , SchemaName , and ApplyNormalization . +type InputSource struct { + + // An Glue table ARN for the input source table. + // + // This member is required. + InputSourceARN *string + + // The name of the schema to be retrieved. + // + // This member is required. + SchemaName *string + + // Normalizes the attributes defined in the schema in the input data. For example, + // if an attribute has an AttributeType of PHONE_NUMBER , and the data in the input + // table is in a format of 1234567890, Entity Resolution will normalize this field + // in the output to (123)-456-7890. + ApplyNormalization *bool + + noSmithyDocumentSerde +} + +// An object containing InputRecords , TotalRecordsProcessed , MatchIDs , and +// RecordsNotProcessed . +type JobMetrics struct { + + // The total number of input records. + InputRecords *int32 + + // The total number of matchID s generated. + MatchIDs *int32 + + // The total number of records that did not get processed, + RecordsNotProcessed *int32 + + // The total number of records processed. + TotalRecordsProcessed *int32 + + noSmithyDocumentSerde +} + +// An object containing the JobId , Status , StartTime , and EndTime of a job. +type JobSummary struct { + + // The ID of the job. + // + // This member is required. + JobId *string + + // The time at which the job was started. + // + // This member is required. + StartTime *time.Time + + // The current status of the job. Either running , succeeded , queued , or failed . + // + // This member is required. + Status JobStatus + + // The time at which the job has finished. + EndTime *time.Time + + noSmithyDocumentSerde +} + +// A list of MatchingWorkflowSummary objects, each of which contain the fields +// WorkflowName , WorkflowArn , CreatedAt , UpdatedAt . +type MatchingWorkflowSummary struct { + + // The timestamp of when the workflow was created. + // + // This member is required. + CreatedAt *time.Time + + // The timestamp of when the workflow was last updated. + // + // This member is required. + UpdatedAt *time.Time + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the + // MatchingWorkflow . + // + // This member is required. + WorkflowArn *string + + // The name of the workflow. + // + // This member is required. + WorkflowName *string + + noSmithyDocumentSerde +} + +// A list of OutputAttribute objects, each of which have the fields Name and +// Hashed. Each of these objects selects a column to be included in the output +// table, and whether the values of the column should be hashed. +type OutputAttribute struct { + + // A name of a column to be written to the output. This must be an InputField name + // in the schema mapping. + // + // This member is required. + Name *string + + // Enables the ability to hash the column values in the output. + Hashed *bool + + noSmithyDocumentSerde +} + +// A list of OutputAttribute objects, each of which have the fields Name and +// Hashed. Each of these objects selects a column to be included in the output +// table, and whether the values of the column should be hashed. +type OutputSource struct { + + // A list of OutputAttribute objects, each of which have the fields Name and + // Hashed. Each of these objects selects a column to be included in the output + // table, and whether the values of the column should be hashed. + // + // This member is required. + Output []OutputAttribute + + // The S3 path to which Entity Resolution will write the output table. + // + // This member is required. + OutputS3Path *string + + // Normalizes the attributes defined in the schema in the input data. For example, + // if an attribute has an AttributeType of PHONE_NUMBER , and the data in the input + // table is in a format of 1234567890, Entity Resolution will normalize this field + // in the output to (123)-456-7890. + ApplyNormalization *bool + + // Customer KMS ARN for encryption at rest. If not provided, system will use an + // Entity Resolution managed KMS key. + KMSArn *string + + noSmithyDocumentSerde +} + +// An object which defines the resolutionType and the ruleBasedProperties +type ResolutionTechniques struct { + + // There are two types of matching, RULE_MATCHING and ML_MATCHING + ResolutionType ResolutionType + + // An object which defines the list of matching rules to run and has a field Rules + // , which is a list of rule objects. + RuleBasedProperties *RuleBasedProperties + + noSmithyDocumentSerde +} + +// An object containing RuleName , and MatchingKeys . +type Rule struct { + + // A list of MatchingKeys . The MatchingKeys must have been defined in the + // SchemaMapping . Two records are considered to match according to this rule if + // all of the MatchingKeys match. + // + // This member is required. + MatchingKeys []string + + // A name for the matching rule. + // + // This member is required. + RuleName *string + + noSmithyDocumentSerde +} + +// An object which defines the list of matching rules to run and has a field Rules +// , which is a list of rule objects. +type RuleBasedProperties struct { + + // You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. + // When choosing MANY_TO_MANY , the system can match attribute across the sub-types + // of an attribute type. For example, if the value of the Email field of Profile A + // and the value of BusinessEmail field of Profile B matches, the two profiles are + // matched on the Email type. When choosing ONE_TO_ONE the system can only match + // if the sub-types are exact matches. For example, only when the value of the + // Email field of Profile A and the value of the Email field of Profile B matches, + // the two profiles are matched on the Email type. + // + // This member is required. + AttributeMatchingModel AttributeMatchingModel + + // A list of Rule objects, each of which have fields RuleName and MatchingKeys . + // + // This member is required. + Rules []Rule + + noSmithyDocumentSerde +} + +// An object containing FieldField , Type , GroupName , and MatchKey . +type SchemaInputAttribute struct { + + // A string containing the field name. + // + // This member is required. + FieldName *string + + // The type of the attribute, selected from a list of values. + // + // This member is required. + Type SchemaAttributeType + + // Instruct Entity Resolution to combine several columns into a unified column + // with the identical attribute type. For example, when working with columns such + // as first_name, middle_name, and last_name, assigning them a common GroupName + // will prompt Entity Resolution to concatenate them into a single value. + GroupName *string + + // A key that allows grouping of multiple input attributes into a unified matching + // group. For example, let's consider a scenario where the source table contains + // various addresses, such as business_address and shipping_address. By assigning + // the MatchKey Address' to both attributes, Entity Resolution will match records + // across these fields to create a consolidated matching group. If no MatchKey is + // specified for a column, it won't be utilized for matching purposes but will + // still be included in the output table. + MatchKey *string + + noSmithyDocumentSerde +} + +// An object containing SchemaName , SchemaArn , CreatedAt , and UpdatedAt . +type SchemaMappingSummary struct { + + // The timestamp of when the SchemaMapping was created. + // + // This member is required. + CreatedAt *time.Time + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the + // SchemaMapping . + // + // This member is required. + SchemaArn *string + + // The name of the schema. + // + // This member is required. + SchemaName *string + + // The timestamp of when the SchemaMapping was last updated. + // + // This member is required. + UpdatedAt *time.Time + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/entityresolution/validators.go b/service/entityresolution/validators.go new file mode 100644 index 00000000000..5a8ebc72ce3 --- /dev/null +++ b/service/entityresolution/validators.go @@ -0,0 +1,837 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package entityresolution + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/entityresolution/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateMatchingWorkflow struct { +} + +func (*validateOpCreateMatchingWorkflow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateMatchingWorkflow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMatchingWorkflowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateMatchingWorkflowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateSchemaMapping struct { +} + +func (*validateOpCreateSchemaMapping) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateSchemaMapping) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateSchemaMappingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateSchemaMappingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteMatchingWorkflow struct { +} + +func (*validateOpDeleteMatchingWorkflow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteMatchingWorkflow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteMatchingWorkflowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteMatchingWorkflowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteSchemaMapping struct { +} + +func (*validateOpDeleteSchemaMapping) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteSchemaMapping) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteSchemaMappingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteSchemaMappingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetMatchId struct { +} + +func (*validateOpGetMatchId) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetMatchId) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetMatchIdInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetMatchIdInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetMatchingJob struct { +} + +func (*validateOpGetMatchingJob) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetMatchingJob) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetMatchingJobInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetMatchingJobInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetMatchingWorkflow struct { +} + +func (*validateOpGetMatchingWorkflow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetMatchingWorkflow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetMatchingWorkflowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetMatchingWorkflowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetSchemaMapping struct { +} + +func (*validateOpGetSchemaMapping) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetSchemaMapping) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetSchemaMappingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetSchemaMappingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListMatchingJobs struct { +} + +func (*validateOpListMatchingJobs) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListMatchingJobs) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListMatchingJobsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListMatchingJobsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsForResource struct { +} + +func (*validateOpListTagsForResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsForResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsForResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartMatchingJob struct { +} + +func (*validateOpStartMatchingJob) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartMatchingJob) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartMatchingJobInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartMatchingJobInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateMatchingWorkflow struct { +} + +func (*validateOpUpdateMatchingWorkflow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateMatchingWorkflow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateMatchingWorkflowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateMatchingWorkflowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateMatchingWorkflowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateMatchingWorkflow{}, middleware.After) +} + +func addOpCreateSchemaMappingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateSchemaMapping{}, middleware.After) +} + +func addOpDeleteMatchingWorkflowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteMatchingWorkflow{}, middleware.After) +} + +func addOpDeleteSchemaMappingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteSchemaMapping{}, middleware.After) +} + +func addOpGetMatchIdValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetMatchId{}, middleware.After) +} + +func addOpGetMatchingJobValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetMatchingJob{}, middleware.After) +} + +func addOpGetMatchingWorkflowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetMatchingWorkflow{}, middleware.After) +} + +func addOpGetSchemaMappingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetSchemaMapping{}, middleware.After) +} + +func addOpListMatchingJobsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListMatchingJobs{}, middleware.After) +} + +func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) +} + +func addOpStartMatchingJobValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartMatchingJob{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateMatchingWorkflowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateMatchingWorkflow{}, middleware.After) +} + +func validateInputSource(v *types.InputSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InputSource"} + if v.InputSourceARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSourceARN")) + } + if v.SchemaName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SchemaName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInputSourceConfig(v []types.InputSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InputSourceConfig"} + for i := range v { + if err := validateInputSource(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOutputAttribute(v *types.OutputAttribute) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OutputAttribute"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOutputAttributes(v []types.OutputAttribute) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OutputAttributes"} + for i := range v { + if err := validateOutputAttribute(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOutputSource(v *types.OutputSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OutputSource"} + if v.OutputS3Path == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputS3Path")) + } + if v.Output == nil { + invalidParams.Add(smithy.NewErrParamRequired("Output")) + } else if v.Output != nil { + if err := validateOutputAttributes(v.Output); err != nil { + invalidParams.AddNested("Output", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOutputSourceConfig(v []types.OutputSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OutputSourceConfig"} + for i := range v { + if err := validateOutputSource(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateResolutionTechniques(v *types.ResolutionTechniques) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResolutionTechniques"} + if v.RuleBasedProperties != nil { + if err := validateRuleBasedProperties(v.RuleBasedProperties); err != nil { + invalidParams.AddNested("RuleBasedProperties", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRule(v *types.Rule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Rule"} + if v.RuleName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RuleName")) + } + if v.MatchingKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("MatchingKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRuleBasedProperties(v *types.RuleBasedProperties) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RuleBasedProperties"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateRuleList(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if len(v.AttributeMatchingModel) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("AttributeMatchingModel")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRuleList(v []types.Rule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RuleList"} + for i := range v { + if err := validateRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSchemaInputAttribute(v *types.SchemaInputAttribute) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SchemaInputAttribute"} + if v.FieldName == nil { + invalidParams.Add(smithy.NewErrParamRequired("FieldName")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSchemaInputAttributes(v []types.SchemaInputAttribute) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SchemaInputAttributes"} + for i := range v { + if err := validateSchemaInputAttribute(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateMatchingWorkflowInput(v *CreateMatchingWorkflowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateMatchingWorkflowInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if v.InputSourceConfig == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSourceConfig")) + } else if v.InputSourceConfig != nil { + if err := validateInputSourceConfig(v.InputSourceConfig); err != nil { + invalidParams.AddNested("InputSourceConfig", err.(smithy.InvalidParamsError)) + } + } + if v.OutputSourceConfig == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSourceConfig")) + } else if v.OutputSourceConfig != nil { + if err := validateOutputSourceConfig(v.OutputSourceConfig); err != nil { + invalidParams.AddNested("OutputSourceConfig", err.(smithy.InvalidParamsError)) + } + } + if v.ResolutionTechniques == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResolutionTechniques")) + } else if v.ResolutionTechniques != nil { + if err := validateResolutionTechniques(v.ResolutionTechniques); err != nil { + invalidParams.AddNested("ResolutionTechniques", err.(smithy.InvalidParamsError)) + } + } + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateSchemaMappingInput(v *CreateSchemaMappingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateSchemaMappingInput"} + if v.SchemaName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SchemaName")) + } + if v.MappedInputFields != nil { + if err := validateSchemaInputAttributes(v.MappedInputFields); err != nil { + invalidParams.AddNested("MappedInputFields", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteMatchingWorkflowInput(v *DeleteMatchingWorkflowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteMatchingWorkflowInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteSchemaMappingInput(v *DeleteSchemaMappingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteSchemaMappingInput"} + if v.SchemaName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SchemaName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetMatchIdInput(v *GetMatchIdInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetMatchIdInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if v.Record == nil { + invalidParams.Add(smithy.NewErrParamRequired("Record")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetMatchingJobInput(v *GetMatchingJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetMatchingJobInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if v.JobId == nil { + invalidParams.Add(smithy.NewErrParamRequired("JobId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetMatchingWorkflowInput(v *GetMatchingWorkflowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetMatchingWorkflowInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetSchemaMappingInput(v *GetSchemaMappingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetSchemaMappingInput"} + if v.SchemaName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SchemaName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListMatchingJobsInput(v *ListMatchingJobsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListMatchingJobsInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartMatchingJobInput(v *StartMatchingJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartMatchingJobInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateMatchingWorkflowInput(v *UpdateMatchingWorkflowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateMatchingWorkflowInput"} + if v.WorkflowName == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkflowName")) + } + if v.InputSourceConfig == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSourceConfig")) + } else if v.InputSourceConfig != nil { + if err := validateInputSourceConfig(v.InputSourceConfig); err != nil { + invalidParams.AddNested("InputSourceConfig", err.(smithy.InvalidParamsError)) + } + } + if v.OutputSourceConfig == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSourceConfig")) + } else if v.OutputSourceConfig != nil { + if err := validateOutputSourceConfig(v.OutputSourceConfig); err != nil { + invalidParams.AddNested("OutputSourceConfig", err.(smithy.InvalidParamsError)) + } + } + if v.ResolutionTechniques == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResolutionTechniques")) + } else if v.ResolutionTechniques != nil { + if err := validateResolutionTechniques(v.ResolutionTechniques); err != nil { + invalidParams.AddNested("ResolutionTechniques", err.(smithy.InvalidParamsError)) + } + } + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/glue/deserializers.go b/service/glue/deserializers.go index 9487dfe8668..ef656188d06 100644 --- a/service/glue/deserializers.go +++ b/service/glue/deserializers.go @@ -28852,6 +28852,16 @@ func awsAwsjson11_deserializeDocumentCodeGenConfigurationNode(v **types.CodeGenC return err } + case "SnowflakeSource": + if err := awsAwsjson11_deserializeDocumentSnowflakeSource(&sv.SnowflakeSource, value); err != nil { + return err + } + + case "SnowflakeTarget": + if err := awsAwsjson11_deserializeDocumentSnowflakeTarget(&sv.SnowflakeTarget, value); err != nil { + return err + } + case "SparkConnectorSource": if err := awsAwsjson11_deserializeDocumentSparkConnectorSource(&sv.SparkConnectorSource, value); err != nil { return err @@ -47728,6 +47738,306 @@ func awsAwsjson11_deserializeDocumentSkewedInfo(v **types.SkewedInfo, value inte return nil } +func awsAwsjson11_deserializeDocumentSnowflakeNodeData(v **types.SnowflakeNodeData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnowflakeNodeData + if *v == nil { + sv = &types.SnowflakeNodeData{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Action": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.Action = ptr.String(jtv) + } + + case "AdditionalOptions": + if err := awsAwsjson11_deserializeDocumentAdditionalOptions(&sv.AdditionalOptions, value); err != nil { + return err + } + + case "AutoPushdown": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanValue to be of type *bool, got %T instead", value) + } + sv.AutoPushdown = jtv + } + + case "Connection": + if err := awsAwsjson11_deserializeDocumentOption(&sv.Connection, value); err != nil { + return err + } + + case "Database": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.Database = ptr.String(jtv) + } + + case "IamRole": + if err := awsAwsjson11_deserializeDocumentOption(&sv.IamRole, value); err != nil { + return err + } + + case "MergeAction": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericLimitedString to be of type string, got %T instead", value) + } + sv.MergeAction = ptr.String(jtv) + } + + case "MergeClause": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.MergeClause = ptr.String(jtv) + } + + case "MergeWhenMatched": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericLimitedString to be of type string, got %T instead", value) + } + sv.MergeWhenMatched = ptr.String(jtv) + } + + case "MergeWhenNotMatched": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericLimitedString to be of type string, got %T instead", value) + } + sv.MergeWhenNotMatched = ptr.String(jtv) + } + + case "PostAction": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.PostAction = ptr.String(jtv) + } + + case "PreAction": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.PreAction = ptr.String(jtv) + } + + case "SampleQuery": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.SampleQuery = ptr.String(jtv) + } + + case "Schema": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.Schema = ptr.String(jtv) + } + + case "SelectedColumns": + if err := awsAwsjson11_deserializeDocumentOptionList(&sv.SelectedColumns, value); err != nil { + return err + } + + case "SourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericLimitedString to be of type string, got %T instead", value) + } + sv.SourceType = ptr.String(jtv) + } + + case "StagingTable": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.StagingTable = ptr.String(jtv) + } + + case "Table": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GenericString to be of type string, got %T instead", value) + } + sv.Table = ptr.String(jtv) + } + + case "TableSchema": + if err := awsAwsjson11_deserializeDocumentOptionList(&sv.TableSchema, value); err != nil { + return err + } + + case "TempDir": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EnclosedInStringProperty to be of type string, got %T instead", value) + } + sv.TempDir = ptr.String(jtv) + } + + case "Upsert": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanValue to be of type *bool, got %T instead", value) + } + sv.Upsert = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnowflakeSource(v **types.SnowflakeSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnowflakeSource + if *v == nil { + sv = &types.SnowflakeSource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Data": + if err := awsAwsjson11_deserializeDocumentSnowflakeNodeData(&sv.Data, value); err != nil { + return err + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NodeName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "OutputSchemas": + if err := awsAwsjson11_deserializeDocumentGlueSchemas(&sv.OutputSchemas, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSnowflakeTarget(v **types.SnowflakeTarget, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SnowflakeTarget + if *v == nil { + sv = &types.SnowflakeTarget{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Data": + if err := awsAwsjson11_deserializeDocumentSnowflakeNodeData(&sv.Data, value); err != nil { + return err + } + + case "Inputs": + if err := awsAwsjson11_deserializeDocumentOneInput(&sv.Inputs, value); err != nil { + return err + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NodeName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentSourceControlDetails(v **types.SourceControlDetails, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/glue/serializers.go b/service/glue/serializers.go index 14cf35a2d66..9755a32bfd6 100644 --- a/service/glue/serializers.go +++ b/service/glue/serializers.go @@ -12553,6 +12553,20 @@ func awsAwsjson11_serializeDocumentCodeGenConfigurationNode(v *types.CodeGenConf } } + if v.SnowflakeSource != nil { + ok := object.Key("SnowflakeSource") + if err := awsAwsjson11_serializeDocumentSnowflakeSource(v.SnowflakeSource, ok); err != nil { + return err + } + } + + if v.SnowflakeTarget != nil { + ok := object.Key("SnowflakeTarget") + if err := awsAwsjson11_serializeDocumentSnowflakeTarget(v.SnowflakeTarget, ok); err != nil { + return err + } + } + if v.SparkConnectorSource != nil { ok := object.Key("SparkConnectorSource") if err := awsAwsjson11_serializeDocumentSparkConnectorSource(v.SparkConnectorSource, ok); err != nil { @@ -18226,6 +18240,180 @@ func awsAwsjson11_serializeDocumentSkewedInfo(v *types.SkewedInfo, value smithyj return nil } +func awsAwsjson11_serializeDocumentSnowflakeNodeData(v *types.SnowflakeNodeData, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Action != nil { + ok := object.Key("Action") + ok.String(*v.Action) + } + + if v.AdditionalOptions != nil { + ok := object.Key("AdditionalOptions") + if err := awsAwsjson11_serializeDocumentAdditionalOptions(v.AdditionalOptions, ok); err != nil { + return err + } + } + + if v.AutoPushdown { + ok := object.Key("AutoPushdown") + ok.Boolean(v.AutoPushdown) + } + + if v.Connection != nil { + ok := object.Key("Connection") + if err := awsAwsjson11_serializeDocumentOption(v.Connection, ok); err != nil { + return err + } + } + + if v.Database != nil { + ok := object.Key("Database") + ok.String(*v.Database) + } + + if v.IamRole != nil { + ok := object.Key("IamRole") + if err := awsAwsjson11_serializeDocumentOption(v.IamRole, ok); err != nil { + return err + } + } + + if v.MergeAction != nil { + ok := object.Key("MergeAction") + ok.String(*v.MergeAction) + } + + if v.MergeClause != nil { + ok := object.Key("MergeClause") + ok.String(*v.MergeClause) + } + + if v.MergeWhenMatched != nil { + ok := object.Key("MergeWhenMatched") + ok.String(*v.MergeWhenMatched) + } + + if v.MergeWhenNotMatched != nil { + ok := object.Key("MergeWhenNotMatched") + ok.String(*v.MergeWhenNotMatched) + } + + if v.PostAction != nil { + ok := object.Key("PostAction") + ok.String(*v.PostAction) + } + + if v.PreAction != nil { + ok := object.Key("PreAction") + ok.String(*v.PreAction) + } + + if v.SampleQuery != nil { + ok := object.Key("SampleQuery") + ok.String(*v.SampleQuery) + } + + if v.Schema != nil { + ok := object.Key("Schema") + ok.String(*v.Schema) + } + + if v.SelectedColumns != nil { + ok := object.Key("SelectedColumns") + if err := awsAwsjson11_serializeDocumentOptionList(v.SelectedColumns, ok); err != nil { + return err + } + } + + if v.SourceType != nil { + ok := object.Key("SourceType") + ok.String(*v.SourceType) + } + + if v.StagingTable != nil { + ok := object.Key("StagingTable") + ok.String(*v.StagingTable) + } + + if v.Table != nil { + ok := object.Key("Table") + ok.String(*v.Table) + } + + if v.TableSchema != nil { + ok := object.Key("TableSchema") + if err := awsAwsjson11_serializeDocumentOptionList(v.TableSchema, ok); err != nil { + return err + } + } + + if v.TempDir != nil { + ok := object.Key("TempDir") + ok.String(*v.TempDir) + } + + if v.Upsert { + ok := object.Key("Upsert") + ok.Boolean(v.Upsert) + } + + return nil +} + +func awsAwsjson11_serializeDocumentSnowflakeSource(v *types.SnowflakeSource, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Data != nil { + ok := object.Key("Data") + if err := awsAwsjson11_serializeDocumentSnowflakeNodeData(v.Data, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.OutputSchemas != nil { + ok := object.Key("OutputSchemas") + if err := awsAwsjson11_serializeDocumentGlueSchemas(v.OutputSchemas, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentSnowflakeTarget(v *types.SnowflakeTarget, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Data != nil { + ok := object.Key("Data") + if err := awsAwsjson11_serializeDocumentSnowflakeNodeData(v.Data, ok); err != nil { + return err + } + } + + if v.Inputs != nil { + ok := object.Key("Inputs") + if err := awsAwsjson11_serializeDocumentOneInput(v.Inputs, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + return nil +} + func awsAwsjson11_serializeDocumentSortCriteria(v []types.SortCriterion, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/service/glue/types/types.go b/service/glue/types/types.go index 872ca04f09f..06759d0aae5 100644 --- a/service/glue/types/types.go +++ b/service/glue/types/types.go @@ -209,7 +209,7 @@ type AmazonRedshiftSource struct { // Specifies an Amazon Redshift target. type AmazonRedshiftTarget struct { - // Specifies the data of the Amazon Reshift target node. + // Specifies the data of the Amazon Redshift target node. Data *AmazonRedshiftNodeData // The nodes that are inputs to the data target. @@ -1010,6 +1010,12 @@ type CodeGenConfigurationNode struct { // DynamicFrames . The output is the selected DynamicFrame SelectFromCollection *SelectFromCollection + // Specifies a Snowflake data source. + SnowflakeSource *SnowflakeSource + + // Specifies a target that writes to a Snowflake data source. + SnowflakeTarget *SnowflakeTarget + // Specifies a connector to an Apache Spark data source. SparkConnectorSource *SparkConnectorSource @@ -6730,6 +6736,129 @@ type SkewedInfo struct { noSmithyDocumentSerde } +// Specifies configuration for Snowflake nodes in Glue Studio. +type SnowflakeNodeData struct { + + // Specifies what action to take when writing to a table with preexisting data. + // Valid values: append , merge , truncate , drop . + Action *string + + // Specifies additional options passed to the Snowflake connector. If options are + // specified elsewhere in this node, this will take precedence. + AdditionalOptions map[string]string + + // Specifies whether automatic query pushdown is enabled. If pushdown is enabled, + // then when a query is run on Spark, if part of the query can be "pushed down" to + // the Snowflake server, it is pushed down. This improves performance of some + // queries. + AutoPushdown bool + + // Specifies a Glue Data Catalog Connection to a Snowflake endpoint. + Connection *Option + + // Specifies a Snowflake database for your node to use. + Database *string + + // Not currently used. + IamRole *Option + + // Specifies a merge action. Valid values: simple , custom . If simple, merge + // behavior is defined by MergeWhenMatched and MergeWhenNotMatched . If custom, + // defined by MergeClause . + MergeAction *string + + // A SQL statement that specifies a custom merge behavior. + MergeClause *string + + // Specifies how to resolve records that match preexisting data when merging. + // Valid values: update , delete . + MergeWhenMatched *string + + // Specifies how to process records that do not match preexisting data when + // merging. Valid values: insert , none . + MergeWhenNotMatched *string + + // A SQL string run after the Snowflake connector performs its standard actions. + PostAction *string + + // A SQL string run before the Snowflake connector performs its standard actions. + PreAction *string + + // A SQL string used to retrieve data with the query sourcetype. + SampleQuery *string + + // Specifies a Snowflake database schema for your node to use. + Schema *string + + // Specifies the columns combined to identify a record when detecting matches for + // merges and upserts. A list of structures with value , label and description + // keys. Each structure describes a column. + SelectedColumns []Option + + // Specifies how retrieved data is specified. Valid values: "table" , "query" . + SourceType *string + + // The name of a staging table used when performing merge or upsert append + // actions. Data is written to this table, then moved to table by a generated + // postaction. + StagingTable *string + + // Specifies a Snowflake table for your node to use. + Table *string + + // Manually defines the target schema for the node. A list of structures with value + // , label and description keys. Each structure defines a column. + TableSchema []Option + + // Not currently used. + TempDir *string + + // Used when Action is append . Specifies the resolution behavior when a row + // already exists. If true, preexisting rows will be updated. If false, those rows + // will be inserted. + Upsert bool + + noSmithyDocumentSerde +} + +// Specifies a Snowflake data source. +type SnowflakeSource struct { + + // Configuration for the Snowflake data source. + // + // This member is required. + Data *SnowflakeNodeData + + // The name of the Snowflake data source. + // + // This member is required. + Name *string + + // Specifies user-defined schemas for your output data. + OutputSchemas []GlueSchema + + noSmithyDocumentSerde +} + +// Specifies a Snowflake target. +type SnowflakeTarget struct { + + // Specifies the data of the Snowflake target node. + // + // This member is required. + Data *SnowflakeNodeData + + // The name of the Snowflake target. + // + // This member is required. + Name *string + + // The nodes that are inputs to the data target. + Inputs []string + + noSmithyDocumentSerde +} + // Specifies a field to sort by and a sort order. type SortCriterion struct { diff --git a/service/glue/validators.go b/service/glue/validators.go index 7841f9daf39..446290a8f8a 100644 --- a/service/glue/validators.go +++ b/service/glue/validators.go @@ -4866,6 +4866,16 @@ func validateCodeGenConfigurationNode(v *types.CodeGenConfigurationNode) error { invalidParams.AddNested("Recipe", err.(smithy.InvalidParamsError)) } } + if v.SnowflakeSource != nil { + if err := validateSnowflakeSource(v.SnowflakeSource); err != nil { + invalidParams.AddNested("SnowflakeSource", err.(smithy.InvalidParamsError)) + } + } + if v.SnowflakeTarget != nil { + if err := validateSnowflakeTarget(v.SnowflakeTarget); err != nil { + invalidParams.AddNested("SnowflakeTarget", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -7190,6 +7200,47 @@ func validateSelectFromCollection(v *types.SelectFromCollection) error { } } +func validateSnowflakeSource(v *types.SnowflakeSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SnowflakeSource"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Data == nil { + invalidParams.Add(smithy.NewErrParamRequired("Data")) + } + if v.OutputSchemas != nil { + if err := validateGlueSchemas(v.OutputSchemas); err != nil { + invalidParams.AddNested("OutputSchemas", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSnowflakeTarget(v *types.SnowflakeTarget) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SnowflakeTarget"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Data == nil { + invalidParams.Add(smithy.NewErrParamRequired("Data")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateSparkConnectorSource(v *types.SparkConnectorSource) error { if v == nil { return nil diff --git a/service/healthlake/api_op_CreateFHIRDatastore.go b/service/healthlake/api_op_CreateFHIRDatastore.go index 1d4e8ab8e65..3c8de3a39da 100644 --- a/service/healthlake/api_op_CreateFHIRDatastore.go +++ b/service/healthlake/api_op_CreateFHIRDatastore.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a Data Store that can ingest and export FHIR formatted data. +// Creates a data store that can ingest and export FHIR formatted data. func (c *Client) CreateFHIRDatastore(ctx context.Context, params *CreateFHIRDatastoreInput, optFns ...func(*Options)) (*CreateFHIRDatastoreOutput, error) { if params == nil { params = &CreateFHIRDatastoreInput{} @@ -30,7 +30,7 @@ func (c *Client) CreateFHIRDatastore(ctx context.Context, params *CreateFHIRData type CreateFHIRDatastoreInput struct { - // The FHIR version of the Data Store. The only supported version is R4. + // The FHIR version of the data store. The only supported version is R4. // // This member is required. DatastoreTypeVersion types.FHIRVersion @@ -38,22 +38,22 @@ type CreateFHIRDatastoreInput struct { // Optional user provided token used for ensuring idempotency. ClientToken *string - // The user generated name for the Data Store. + // The user generated name for the data store. DatastoreName *string - // The configuration of the identity provider that you want to use for your Data - // Store. + // The configuration of the identity provider that you want to use for your data + // store. IdentityProviderConfiguration *types.IdentityProviderConfiguration - // Optional parameter to preload data upon creation of the Data Store. Currently, + // Optional parameter to preload data upon creation of the data store. Currently, // the only supported preloaded data is synthetic data generated from Synthea. PreloadDataConfig *types.PreloadDataConfig // The server-side encryption key configuration for a customer provided encryption - // key specified for creating a Data Store. + // key specified for creating a data store. SseConfiguration *types.SseConfiguration - // Resource tags that are applied to a Data Store when it is created. + // Resource tags that are applied to a data store when it is created. Tags []types.Tag noSmithyDocumentSerde @@ -61,25 +61,24 @@ type CreateFHIRDatastoreInput struct { type CreateFHIRDatastoreOutput struct { - // The Data Store ARN is generated during the creation of the Data Store and can - // be found in the output from the initial Data Store creation call. + // The data store ARN is generated during the creation of the data store and can + // be found in the output from the initial data store creation call. // // This member is required. DatastoreArn *string - // The AWS endpoint for the created Data Store. + // The AWS endpoint for the created data store. // // This member is required. DatastoreEndpoint *string - // The AWS-generated Data Store id. This id is in the output from the initial Data - // Store creation call. + // The AWS-generated data store id. This id is in the output from the initial data + // store creation call. // // This member is required. DatastoreId *string - // The status of the FHIR Data Store. Possible statuses are ‘CREATING’, ‘ACTIVE’, - // ‘DELETING’, ‘DELETED’. + // The status of the FHIR data store. // // This member is required. DatastoreStatus types.DatastoreStatus diff --git a/service/healthlake/api_op_DeleteFHIRDatastore.go b/service/healthlake/api_op_DeleteFHIRDatastore.go index 07652c5d914..47aa46a481c 100644 --- a/service/healthlake/api_op_DeleteFHIRDatastore.go +++ b/service/healthlake/api_op_DeleteFHIRDatastore.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a Data Store. +// Deletes a data store. func (c *Client) DeleteFHIRDatastore(ctx context.Context, params *DeleteFHIRDatastoreInput, optFns ...func(*Options)) (*DeleteFHIRDatastoreOutput, error) { if params == nil { params = &DeleteFHIRDatastoreInput{} @@ -29,7 +29,7 @@ func (c *Client) DeleteFHIRDatastore(ctx context.Context, params *DeleteFHIRData type DeleteFHIRDatastoreInput struct { - // The AWS-generated ID for the Data Store to be deleted. + // The AWS-generated ID for the data store to be deleted. // // This member is required. DatastoreId *string @@ -39,22 +39,22 @@ type DeleteFHIRDatastoreInput struct { type DeleteFHIRDatastoreOutput struct { - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission. + // The Amazon Resource Name (ARN) that gives AWS HealthLake access permission. // // This member is required. DatastoreArn *string - // The AWS endpoint for the Data Store the user has requested to be deleted. + // The AWS endpoint for the data store the user has requested to be deleted. // // This member is required. DatastoreEndpoint *string - // The AWS-generated ID for the Data Store to be deleted. + // The AWS-generated ID for the data store to be deleted. // // This member is required. DatastoreId *string - // The status of the Data Store that the user has requested to be deleted. + // The status of the data store that the user has requested to be deleted. // // This member is required. DatastoreStatus types.DatastoreStatus diff --git a/service/healthlake/api_op_DescribeFHIRDatastore.go b/service/healthlake/api_op_DescribeFHIRDatastore.go index 7e27379b7d4..6ffc121a3f1 100644 --- a/service/healthlake/api_op_DescribeFHIRDatastore.go +++ b/service/healthlake/api_op_DescribeFHIRDatastore.go @@ -11,9 +11,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets the properties associated with the FHIR Data Store, including the Data -// Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data -// Store type version, and Data Store endpoint. +// Gets the properties associated with the FHIR data store, including the data +// store ID, data store ARN, data store name, data store status, when the data +// store was created, data store type version, and the data store's endpoint. func (c *Client) DescribeFHIRDatastore(ctx context.Context, params *DescribeFHIRDatastoreInput, optFns ...func(*Options)) (*DescribeFHIRDatastoreOutput, error) { if params == nil { params = &DescribeFHIRDatastoreInput{} @@ -31,7 +31,7 @@ func (c *Client) DescribeFHIRDatastore(ctx context.Context, params *DescribeFHIR type DescribeFHIRDatastoreInput struct { - // The AWS-generated Data Store ID. + // The AWS-generated data store ID. // // This member is required. DatastoreId *string @@ -41,9 +41,9 @@ type DescribeFHIRDatastoreInput struct { type DescribeFHIRDatastoreOutput struct { - // All properties associated with a Data Store, including the Data Store ID, Data - // Store ARN, Data Store name, Data Store status, created at, Data Store type - // version, and Data Store endpoint. + // All properties associated with a data store, including the data store ID, data + // store ARN, data store name, data store status, when the data store was created, + // data store type version, and the data store's endpoint. // // This member is required. DatastoreProperties *types.DatastoreProperties diff --git a/service/healthlake/api_op_DescribeFHIRExportJob.go b/service/healthlake/api_op_DescribeFHIRExportJob.go index f1b927ddfba..72d0f2be0dc 100644 --- a/service/healthlake/api_op_DescribeFHIRExportJob.go +++ b/service/healthlake/api_op_DescribeFHIRExportJob.go @@ -30,7 +30,7 @@ func (c *Client) DescribeFHIRExportJob(ctx context.Context, params *DescribeFHIR type DescribeFHIRExportJobInput struct { - // The AWS generated ID for the Data Store from which files are being exported + // The AWS generated ID for the data store from which files are being exported // from for an export job. // // This member is required. diff --git a/service/healthlake/api_op_DescribeFHIRImportJob.go b/service/healthlake/api_op_DescribeFHIRImportJob.go index b077d42f5ef..6e9a8103cab 100644 --- a/service/healthlake/api_op_DescribeFHIRImportJob.go +++ b/service/healthlake/api_op_DescribeFHIRImportJob.go @@ -30,7 +30,7 @@ func (c *Client) DescribeFHIRImportJob(ctx context.Context, params *DescribeFHIR type DescribeFHIRImportJobInput struct { - // The AWS-generated ID of the Data Store. + // The AWS-generated ID of the data store. // // This member is required. DatastoreId *string diff --git a/service/healthlake/api_op_ListFHIRDatastores.go b/service/healthlake/api_op_ListFHIRDatastores.go index 85f24d83efd..50e14abbbe8 100644 --- a/service/healthlake/api_op_ListFHIRDatastores.go +++ b/service/healthlake/api_op_ListFHIRDatastores.go @@ -12,8 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists all FHIR Data Stores that are in the user’s account, regardless of Data -// Store status. +// Lists all FHIR data stores that are in the user’s account, regardless of data +// store status. func (c *Client) ListFHIRDatastores(ctx context.Context, params *ListFHIRDatastoresInput, optFns ...func(*Options)) (*ListFHIRDatastoresOutput, error) { if params == nil { params = &ListFHIRDatastoresInput{} @@ -31,14 +31,14 @@ func (c *Client) ListFHIRDatastores(ctx context.Context, params *ListFHIRDatasto type ListFHIRDatastoresInput struct { - // Lists all filters associated with a FHIR Data Store request. + // Lists all filters associated with a FHIR data store request. Filter *types.DatastoreFilter - // The maximum number of Data Stores returned in a single page of a + // The maximum number of data stores returned in a single page of a // ListFHIRDatastoresRequest call. MaxResults *int32 - // Fetches the next page of Data Stores when results are paginated. + // Fetches the next page of data stores when results are paginated. NextToken *string noSmithyDocumentSerde @@ -46,7 +46,7 @@ type ListFHIRDatastoresInput struct { type ListFHIRDatastoresOutput struct { - // All properties associated with the listed Data Stores. + // All properties associated with the listed data stores. // // This member is required. DatastorePropertiesList []types.DatastoreProperties @@ -134,7 +134,7 @@ var _ ListFHIRDatastoresAPIClient = (*Client)(nil) // ListFHIRDatastoresPaginatorOptions is the paginator options for // ListFHIRDatastores type ListFHIRDatastoresPaginatorOptions struct { - // The maximum number of Data Stores returned in a single page of a + // The maximum number of data stores returned in a single page of a // ListFHIRDatastoresRequest call. Limit int32 diff --git a/service/healthlake/api_op_ListFHIRExportJobs.go b/service/healthlake/api_op_ListFHIRExportJobs.go index d7442b24d8e..288ccca4cb1 100644 --- a/service/healthlake/api_op_ListFHIRExportJobs.go +++ b/service/healthlake/api_op_ListFHIRExportJobs.go @@ -31,8 +31,8 @@ func (c *Client) ListFHIRExportJobs(ctx context.Context, params *ListFHIRExportJ type ListFHIRExportJobsInput struct { - // This parameter limits the response to the export job with the specified Data - // Store ID. + // This parameter limits the response to the export job with the specified data + // store ID. // // This member is required. DatastoreId *string diff --git a/service/healthlake/api_op_ListFHIRImportJobs.go b/service/healthlake/api_op_ListFHIRImportJobs.go index 448fd1b68e8..f758cefd255 100644 --- a/service/healthlake/api_op_ListFHIRImportJobs.go +++ b/service/healthlake/api_op_ListFHIRImportJobs.go @@ -31,8 +31,8 @@ func (c *Client) ListFHIRImportJobs(ctx context.Context, params *ListFHIRImportJ type ListFHIRImportJobsInput struct { - // This parameter limits the response to the import job with the specified Data - // Store ID. + // This parameter limits the response to the import job with the specified data + // store ID. // // This member is required. DatastoreId *string diff --git a/service/healthlake/api_op_ListTagsForResource.go b/service/healthlake/api_op_ListTagsForResource.go index 9e569fcdefd..de9635c33b4 100644 --- a/service/healthlake/api_op_ListTagsForResource.go +++ b/service/healthlake/api_op_ListTagsForResource.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of all existing tags associated with a Data Store. +// Returns a list of all existing tags associated with a data store. func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { if params == nil { params = &ListTagsForResourceInput{} @@ -29,7 +29,7 @@ func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForRes type ListTagsForResourceInput struct { - // The Amazon Resource Name(ARN) of the Data Store for which tags are being added. + // The Amazon Resource Name(ARN) of the data store for which tags are being added. // // This member is required. ResourceARN *string @@ -39,7 +39,7 @@ type ListTagsForResourceInput struct { type ListTagsForResourceOutput struct { - // Returns a list of tags associated with a Data Store. + // Returns a list of tags associated with a data store. Tags []types.Tag // Metadata pertaining to the operation's result. diff --git a/service/healthlake/api_op_StartFHIRExportJob.go b/service/healthlake/api_op_StartFHIRExportJob.go index 1a136c871b0..35f09b751a7 100644 --- a/service/healthlake/api_op_StartFHIRExportJob.go +++ b/service/healthlake/api_op_StartFHIRExportJob.go @@ -40,7 +40,7 @@ type StartFHIRExportJobInput struct { // This member is required. DataAccessRoleArn *string - // The AWS generated ID for the Data Store from which files are being exported for + // The AWS generated ID for the data store from which files are being exported for // an export job. // // This member is required. @@ -70,7 +70,7 @@ type StartFHIRExportJobOutput struct { // This member is required. JobStatus types.JobStatus - // The AWS generated ID for the Data Store from which files are being exported for + // The AWS generated ID for the data store from which files are being exported for // an export job. DatastoreId *string diff --git a/service/healthlake/api_op_StartFHIRImportJob.go b/service/healthlake/api_op_StartFHIRImportJob.go index 52a04fed0cd..4451deef497 100644 --- a/service/healthlake/api_op_StartFHIRImportJob.go +++ b/service/healthlake/api_op_StartFHIRImportJob.go @@ -35,12 +35,12 @@ type StartFHIRImportJobInput struct { // This member is required. ClientToken *string - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission. + // The Amazon Resource Name (ARN) that gives AWS HealthLake access permission. // // This member is required. DataAccessRoleArn *string - // The AWS-generated Data Store ID. + // The AWS-generated data store ID. // // This member is required. DatastoreId *string @@ -73,7 +73,7 @@ type StartFHIRImportJobOutput struct { // This member is required. JobStatus types.JobStatus - // The AWS-generated Data Store ID. + // The AWS-generated data store ID. DatastoreId *string // Metadata pertaining to the operation's result. diff --git a/service/healthlake/api_op_TagResource.go b/service/healthlake/api_op_TagResource.go index a3235cf9a04..f55ce4205f3 100644 --- a/service/healthlake/api_op_TagResource.go +++ b/service/healthlake/api_op_TagResource.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Adds a user specified key and value tag to a Data Store. +// Adds a user specified key and value tag to a data store. func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -29,13 +29,13 @@ func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optF type TagResourceInput struct { - // The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data - // Store which tags are being added to. + // The Amazon Resource Name(ARN)that gives AWS HealthLake access to the data store + // which tags are being added to. // // This member is required. ResourceARN *string - // The user specified key and value pair tags being added to a Data Store. + // The user specified key and value pair tags being added to a data store. // // This member is required. Tags []types.Tag diff --git a/service/healthlake/api_op_UntagResource.go b/service/healthlake/api_op_UntagResource.go index 4af1b96a3e8..ff97f537cd3 100644 --- a/service/healthlake/api_op_UntagResource.go +++ b/service/healthlake/api_op_UntagResource.go @@ -10,7 +10,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes tags from a Data Store. +// Removes tags from a data store. func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} @@ -28,13 +28,13 @@ func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, type UntagResourceInput struct { - // "The Amazon Resource Name(ARN) of the Data Store for which tags are being - // removed + // The Amazon Resource Name(ARN) of the data store for which tags are being + // removed. // // This member is required. ResourceARN *string - // The keys for the tags to be removed from the Healthlake Data Store. + // The keys for the tags to be removed from the HealthLake data store. // // This member is required. TagKeys []string diff --git a/service/healthlake/doc.go b/service/healthlake/doc.go index bfcf18f3685..a20cb9d06eb 100644 --- a/service/healthlake/doc.go +++ b/service/healthlake/doc.go @@ -3,7 +3,7 @@ // Package healthlake provides the API client, operations, and parameter types for // Amazon HealthLake. // -// Amazon HealthLake is a HIPAA eligibile service that allows customers to store, +// AWS HealthLake is a HIPAA eligibile service that allows customers to store, // transform, query, and analyze their FHIR-formatted data in a consistent fashion // in the cloud. package healthlake diff --git a/service/healthlake/types/errors.go b/service/healthlake/types/errors.go index a063d408bc4..3b03354ae0a 100644 --- a/service/healthlake/types/errors.go +++ b/service/healthlake/types/errors.go @@ -33,7 +33,7 @@ func (e *AccessDeniedException) ErrorCode() string { } func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The Data Store is in a transition state and the user requested action can not +// The data store is in a transition state and the user requested action can not // be performed. type ConflictException struct { Message *string @@ -86,7 +86,7 @@ func (e *InternalServerException) ErrorCode() string { } func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// The requested Data Store was not found. +// The requested data store was not found. type ResourceNotFoundException struct { Message *string diff --git a/service/healthlake/types/types.go b/service/healthlake/types/types.go index 49eb356b935..a4218d308b0 100644 --- a/service/healthlake/types/types.go +++ b/service/healthlake/types/types.go @@ -7,48 +7,47 @@ import ( "time" ) -// The filters applied to Data Store query. +// The filters applied to data store query. type DatastoreFilter struct { - // A filter that allows the user to set cutoff dates for records. All Data Stores + // A filter that allows the user to set cutoff dates for records. All data stores // created after the specified date will be included in the results. CreatedAfter *time.Time - // A filter that allows the user to set cutoff dates for records. All Data Stores + // A filter that allows the user to set cutoff dates for records. All data stores // created before the specified date will be included in the results. CreatedBefore *time.Time - // Allows the user to filter Data Store results by name. + // Allows the user to filter data store results by name. DatastoreName *string - // Allows the user to filter Data Store results by status. + // Allows the user to filter data store results by status. DatastoreStatus DatastoreStatus noSmithyDocumentSerde } -// Displays the properties of the Data Store, including the ID, ARN, name, and the -// status of the Data Store. +// Displays the properties of the data store, including the ID, ARN, name, and the +// status of the data store. type DatastoreProperties struct { - // The Amazon Resource Name used in the creation of the Data Store. + // The Amazon Resource Name used in the creation of the data store. // // This member is required. DatastoreArn *string - // The AWS endpoint for the Data Store. Each Data Store will have it's own - // endpoint with Data Store ID in the endpoint URL. + // The AWS endpoint for the data store. Each data store will have it's own + // endpoint with data store ID in the endpoint URL. // // This member is required. DatastoreEndpoint *string - // The AWS-generated ID number for the Data Store. + // The AWS-generated ID number for the data store. // // This member is required. DatastoreId *string - // The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', - // 'DELETING', or 'DELETED'. + // The status of the data store. // // This member is required. DatastoreStatus DatastoreStatus @@ -58,16 +57,16 @@ type DatastoreProperties struct { // This member is required. DatastoreTypeVersion FHIRVersion - // The time that a Data Store was created. + // The time that a data store was created. CreatedAt *time.Time - // The user-generated name for the Data Store. + // The user-generated name for the data store. DatastoreName *string - // The identity provider that you selected when you created the Data Store. + // The identity provider that you selected when you created the data store. IdentityProviderConfiguration *IdentityProviderConfiguration - // The preloaded data configuration for the Data Store. Only data preloaded from + // The preloaded data configuration for the data store. Only data preloaded from // Synthea is supported. PreloadDataConfig *PreloadDataConfig @@ -82,7 +81,7 @@ type DatastoreProperties struct { // status of the job. type ExportJobProperties struct { - // The AWS generated ID for the Data Store from which files are being exported for + // The AWS generated ID for the data store from which files are being exported for // an export job. // // This member is required. @@ -124,16 +123,16 @@ type ExportJobProperties struct { noSmithyDocumentSerde } -// The identity provider configuration that you gave when the Data Store was +// The identity provider configuration that you gave when the data store was // created. type IdentityProviderConfiguration struct { - // The authorization strategy that you selected when you created the Data Store. + // The authorization strategy that you selected when you created the data store. // // This member is required. AuthorizationStrategy AuthorizationStrategy - // If you enabled fine-grained authorization when you created the Data Store. + // If you enabled fine-grained authorization when you created the data store. FineGrainedAuthorizationEnabled bool // The Amazon Resource Name (ARN) of the Lambda function that you want to use to @@ -159,7 +158,7 @@ type IdentityProviderConfiguration struct { } // Displays the properties of the import job, including the ID, Arn, Name, and the -// status of the Data Store. +// status of the data store. type ImportJobProperties struct { // The datastore id used when the Import job was created. @@ -188,8 +187,8 @@ type ImportJobProperties struct { // This member is required. SubmitTime *time.Time - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your - // input data. + // The Amazon Resource Name (ARN) that gives AWS HealthLake access to your input + // data. DataAccessRoleArn *string // The time that the Import job was completed. @@ -217,7 +216,7 @@ type InputDataConfig interface { } // The S3Uri is the user specified S3 location of the FHIR data to be imported -// into Amazon HealthLake. +// into AWS HealthLake. type InputDataConfigMemberS3Uri struct { Value string @@ -226,17 +225,17 @@ type InputDataConfigMemberS3Uri struct { func (*InputDataConfigMemberS3Uri) isInputDataConfig() {} -// The customer-managed-key(CMK) used when creating a Data Store. If a customer +// The customer-managed-key(CMK) used when creating a data store. If a customer // owned key is not specified, an AWS owned key will be used for encryption. type KmsEncryptionConfig struct { - // The type of customer-managed-key(CMK) used for encyrption. The two types of + // The type of customer-managed-key(CMK) used for encryption. The two types of // supported CMKs are customer owned CMKs and AWS owned CMKs. // // This member is required. CmkType CmkType - // The KMS encryption key id/alias used to encrypt the Data Store contents at rest. + // The KMS encryption key id/alias used to encrypt the data store contents at rest. KmsKeyId *string noSmithyDocumentSerde @@ -260,7 +259,7 @@ type OutputDataConfigMemberS3Configuration struct { func (*OutputDataConfigMemberS3Configuration) isOutputDataConfig() {} -// The input properties for the preloaded Data Store. Only data preloaded from +// The input properties for the preloaded data store. Only data preloaded from // Synthea is supported. type PreloadDataConfig struct { @@ -282,7 +281,7 @@ type S3Configuration struct { KmsKeyId *string // The S3Uri is the user specified S3 location of the FHIR data to be imported - // into Amazon HealthLake. + // into AWS HealthLake. // // This member is required. S3Uri *string diff --git a/service/managedblockchainquery/LICENSE.txt b/service/managedblockchainquery/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/service/managedblockchainquery/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/service/managedblockchainquery/api_client.go b/service/managedblockchainquery/api_client.go new file mode 100644 index 00000000000..5aea7da25f1 --- /dev/null +++ b/service/managedblockchainquery/api_client.go @@ -0,0 +1,446 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "ManagedBlockchain Query" +const ServiceAPIVersion = "2023-05-04" + +// Client provides the API client to make operations call for Amazon Managed +// Blockchain Query. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "managedblockchainquery", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/service/managedblockchainquery/api_client_test.go b/service/managedblockchainquery/api_client_test.go new file mode 100644 index 00000000000..e7988c7e6fa --- /dev/null +++ b/service/managedblockchainquery/api_client_test.go @@ -0,0 +1,123 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io/ioutil" + "net/http" + "strings" + "testing" +) + +func TestClient_resolveRetryOptions(t *testing.T) { + nopClient := smithyhttp.ClientDoFunc(func(_ *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader("")), + }, nil + }) + + cases := map[string]struct { + defaultsMode aws.DefaultsMode + retryer aws.Retryer + retryMaxAttempts int + opRetryMaxAttempts *int + retryMode aws.RetryMode + expectClientRetryMode aws.RetryMode + expectClientMaxAttempts int + expectOpMaxAttempts int + }{ + "defaults": { + defaultsMode: aws.DefaultsModeStandard, + expectClientRetryMode: aws.RetryModeStandard, + expectClientMaxAttempts: 3, + expectOpMaxAttempts: 3, + }, + "custom default retry": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(2), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 2, + }, + "custom op no change max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(10), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + "custom op 0 max attempts": { + retryMode: aws.RetryModeAdaptive, + retryMaxAttempts: 10, + opRetryMaxAttempts: aws.Int(0), + expectClientRetryMode: aws.RetryModeAdaptive, + expectClientMaxAttempts: 10, + expectOpMaxAttempts: 10, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + client := NewFromConfig(aws.Config{ + DefaultsMode: c.defaultsMode, + Retryer: func() func() aws.Retryer { + if c.retryer == nil { + return nil + } + + return func() aws.Retryer { return c.retryer } + }(), + HTTPClient: nopClient, + RetryMaxAttempts: c.retryMaxAttempts, + RetryMode: c.retryMode, + }) + + if e, a := c.expectClientRetryMode, client.options.RetryMode; e != a { + t.Errorf("expect %v retry mode, got %v", e, a) + } + if e, a := c.expectClientMaxAttempts, client.options.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v max attempts, got %v", e, a) + } + + _, _, err := client.invokeOperation(context.Background(), "mockOperation", struct{}{}, + []func(*Options){ + func(o *Options) { + if c.opRetryMaxAttempts == nil { + return + } + o.RetryMaxAttempts = *c.opRetryMaxAttempts + }, + }, + func(s *middleware.Stack, o Options) error { + s.Initialize.Clear() + s.Serialize.Clear() + s.Build.Clear() + s.Finalize.Clear() + s.Deserialize.Clear() + + if e, a := c.expectOpMaxAttempts, o.Retryer.MaxAttempts(); e != a { + t.Errorf("expect %v op max attempts, got %v", e, a) + } + return nil + }) + if err != nil { + t.Fatalf("expect no operation error, got %v", err) + } + }) + } +} diff --git a/service/managedblockchainquery/api_op_BatchGetTokenBalance.go b/service/managedblockchainquery/api_op_BatchGetTokenBalance.go new file mode 100644 index 00000000000..a5dda642943 --- /dev/null +++ b/service/managedblockchainquery/api_op_BatchGetTokenBalance.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the token balance for a batch of tokens by using the GetTokenBalance +// action for every token in the request. Only the native tokens BTC,ETH, and the +// ERC-20, ERC-721, and ERC 1155 token standards are supported. +func (c *Client) BatchGetTokenBalance(ctx context.Context, params *BatchGetTokenBalanceInput, optFns ...func(*Options)) (*BatchGetTokenBalanceOutput, error) { + if params == nil { + params = &BatchGetTokenBalanceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchGetTokenBalance", params, optFns, c.addOperationBatchGetTokenBalanceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchGetTokenBalanceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchGetTokenBalanceInput struct { + + // An array of GetTokenBalanceInput objects whose balance is being requested. + GetTokenBalanceInputs []types.BatchGetTokenBalanceInputItem + + noSmithyDocumentSerde +} + +type BatchGetTokenBalanceOutput struct { + + // An array of BatchGetTokenBalanceErrorItem objects returned from the request. + // + // This member is required. + Errors []types.BatchGetTokenBalanceErrorItem + + // An array of BatchGetTokenBalanceOutputItem objects returned by the response. + // + // This member is required. + TokenBalances []types.BatchGetTokenBalanceOutputItem + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchGetTokenBalanceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpBatchGetTokenBalance{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpBatchGetTokenBalance{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchGetTokenBalanceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetTokenBalance(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchGetTokenBalance(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "managedblockchain-query", + OperationName: "BatchGetTokenBalance", + } +} diff --git a/service/managedblockchainquery/api_op_GetTokenBalance.go b/service/managedblockchainquery/api_op_GetTokenBalance.go new file mode 100644 index 00000000000..ba0ee140687 --- /dev/null +++ b/service/managedblockchainquery/api_op_GetTokenBalance.go @@ -0,0 +1,155 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the balance of a specific token, including native tokens, for a given +// address (wallet or contract) on the blockchain. Only the native tokens BTC,ETH, +// and the ERC-20, ERC-721, and ERC 1155 token standards are supported. +func (c *Client) GetTokenBalance(ctx context.Context, params *GetTokenBalanceInput, optFns ...func(*Options)) (*GetTokenBalanceOutput, error) { + if params == nil { + params = &GetTokenBalanceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetTokenBalance", params, optFns, c.addOperationGetTokenBalanceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetTokenBalanceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetTokenBalanceInput struct { + + // The container for the identifier for the owner. + // + // This member is required. + OwnerIdentifier *types.OwnerIdentifier + + // The container for the identifier for the token, including the unique token ID + // and its blockchain network. + // + // This member is required. + TokenIdentifier *types.TokenIdentifier + + // The time for when the TokenBalance is requested or the current time if a time + // is not provided in the request. This time will only be recorded up to the + // second. + AtBlockchainInstant *types.BlockchainInstant + + noSmithyDocumentSerde +} + +type GetTokenBalanceOutput struct { + + // The container for time. + // + // This member is required. + AtBlockchainInstant *types.BlockchainInstant + + // The container for the token balance. + // + // This member is required. + Balance *string + + // The container for time. + LastUpdatedTime *types.BlockchainInstant + + // The container for the identifier of the owner. + OwnerIdentifier *types.OwnerIdentifier + + // The container for the identifier for the token including the unique token ID + // and its blockchain network. Only the native tokens BTC,ETH, and the ERC-20, + // ERC-721, and ERC 1155 token standards are supported. + TokenIdentifier *types.TokenIdentifier + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetTokenBalanceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetTokenBalance{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetTokenBalance{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetTokenBalanceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetTokenBalance(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetTokenBalance(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "managedblockchain-query", + OperationName: "GetTokenBalance", + } +} diff --git a/service/managedblockchainquery/api_op_GetTransaction.go b/service/managedblockchainquery/api_op_GetTransaction.go new file mode 100644 index 00000000000..ac8a9447234 --- /dev/null +++ b/service/managedblockchainquery/api_op_GetTransaction.go @@ -0,0 +1,132 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Get the details of a transaction. +func (c *Client) GetTransaction(ctx context.Context, params *GetTransactionInput, optFns ...func(*Options)) (*GetTransactionOutput, error) { + if params == nil { + params = &GetTransactionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetTransaction", params, optFns, c.addOperationGetTransactionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetTransactionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetTransactionInput struct { + + // The blockchain network where the transaction occurred. + // + // This member is required. + Network types.QueryNetwork + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // This member is required. + TransactionHash *string + + noSmithyDocumentSerde +} + +type GetTransactionOutput struct { + + // Contains the details of the transaction. + // + // This member is required. + Transaction *types.Transaction + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetTransactionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetTransaction{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetTransaction{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetTransactionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetTransaction(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetTransaction(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "managedblockchain-query", + OperationName: "GetTransaction", + } +} diff --git a/service/managedblockchainquery/api_op_ListTokenBalances.go b/service/managedblockchainquery/api_op_ListTokenBalances.go new file mode 100644 index 00000000000..4e8567d95a6 --- /dev/null +++ b/service/managedblockchainquery/api_op_ListTokenBalances.go @@ -0,0 +1,242 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action returns the following for a given a blockchain network: +// - Lists all token balances owned by an address (either a contact address or a +// wallet address). +// - Lists all token balances for all tokens created by a contract. +// - Lists all token balances for a given token. +// +// You must always specify the network property of the tokenFilter when using this +// operation. +func (c *Client) ListTokenBalances(ctx context.Context, params *ListTokenBalancesInput, optFns ...func(*Options)) (*ListTokenBalancesOutput, error) { + if params == nil { + params = &ListTokenBalancesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTokenBalances", params, optFns, c.addOperationListTokenBalancesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTokenBalancesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTokenBalancesInput struct { + + // The contract address or a token identifier on the blockchain network by which + // to filter the request. You must specify the contractAddress property of this + // container when listing tokens minted by a contract. You must always specify the + // network property of this container when using this operation. + // + // This member is required. + TokenFilter *types.TokenFilter + + // The maximum number of token balances to return. + MaxResults *int32 + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + // The contract or wallet address on the blockchain network by which to filter the + // request. You must specify the address property of the ownerFilter when listing + // balances of tokens owned by the address. + OwnerFilter *types.OwnerFilter + + noSmithyDocumentSerde +} + +type ListTokenBalancesOutput struct { + + // An array of TokenBalance objects. Each object contains details about the token + // balance. + // + // This member is required. + TokenBalances []types.TokenBalance + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTokenBalancesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTokenBalances{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTokenBalances{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTokenBalancesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTokenBalances(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListTokenBalancesAPIClient is a client that implements the ListTokenBalances +// operation. +type ListTokenBalancesAPIClient interface { + ListTokenBalances(context.Context, *ListTokenBalancesInput, ...func(*Options)) (*ListTokenBalancesOutput, error) +} + +var _ ListTokenBalancesAPIClient = (*Client)(nil) + +// ListTokenBalancesPaginatorOptions is the paginator options for ListTokenBalances +type ListTokenBalancesPaginatorOptions struct { + // The maximum number of token balances to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTokenBalancesPaginator is a paginator for ListTokenBalances +type ListTokenBalancesPaginator struct { + options ListTokenBalancesPaginatorOptions + client ListTokenBalancesAPIClient + params *ListTokenBalancesInput + nextToken *string + firstPage bool +} + +// NewListTokenBalancesPaginator returns a new ListTokenBalancesPaginator +func NewListTokenBalancesPaginator(client ListTokenBalancesAPIClient, params *ListTokenBalancesInput, optFns ...func(*ListTokenBalancesPaginatorOptions)) *ListTokenBalancesPaginator { + if params == nil { + params = &ListTokenBalancesInput{} + } + + options := ListTokenBalancesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTokenBalancesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTokenBalancesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTokenBalances page. +func (p *ListTokenBalancesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTokenBalancesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListTokenBalances(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListTokenBalances(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "managedblockchain-query", + OperationName: "ListTokenBalances", + } +} diff --git a/service/managedblockchainquery/api_op_ListTransactionEvents.go b/service/managedblockchainquery/api_op_ListTransactionEvents.go new file mode 100644 index 00000000000..8984fe0ca3d --- /dev/null +++ b/service/managedblockchainquery/api_op_ListTransactionEvents.go @@ -0,0 +1,243 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// An array of TransactionEvent objects. Each object contains details about the +// transaction event. +func (c *Client) ListTransactionEvents(ctx context.Context, params *ListTransactionEventsInput, optFns ...func(*Options)) (*ListTransactionEventsOutput, error) { + if params == nil { + params = &ListTransactionEventsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTransactionEvents", params, optFns, c.addOperationListTransactionEventsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTransactionEventsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTransactionEventsInput struct { + + // The blockchain network where the transaction events occurred. + // + // This member is required. + Network types.QueryNetwork + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // This member is required. + TransactionHash *string + + // The maximum number of transaction events to list. Even if additional results + // can be retrieved, the request can return less results than maxResults or an + // empty array of results. To retrieve the next set of results, make another + // request with the returned nextToken value. The value of nextToken is null when + // there are no more results to return + MaxResults *int32 + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + noSmithyDocumentSerde +} + +type ListTransactionEventsOutput struct { + + // An array of TransactionEvent objects. Each object contains details about the + // transaction events. + // + // This member is required. + Events []types.TransactionEvent + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTransactionEventsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTransactionEvents{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTransactionEvents{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTransactionEventsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTransactionEvents(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListTransactionEventsAPIClient is a client that implements the +// ListTransactionEvents operation. +type ListTransactionEventsAPIClient interface { + ListTransactionEvents(context.Context, *ListTransactionEventsInput, ...func(*Options)) (*ListTransactionEventsOutput, error) +} + +var _ ListTransactionEventsAPIClient = (*Client)(nil) + +// ListTransactionEventsPaginatorOptions is the paginator options for +// ListTransactionEvents +type ListTransactionEventsPaginatorOptions struct { + // The maximum number of transaction events to list. Even if additional results + // can be retrieved, the request can return less results than maxResults or an + // empty array of results. To retrieve the next set of results, make another + // request with the returned nextToken value. The value of nextToken is null when + // there are no more results to return + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTransactionEventsPaginator is a paginator for ListTransactionEvents +type ListTransactionEventsPaginator struct { + options ListTransactionEventsPaginatorOptions + client ListTransactionEventsAPIClient + params *ListTransactionEventsInput + nextToken *string + firstPage bool +} + +// NewListTransactionEventsPaginator returns a new ListTransactionEventsPaginator +func NewListTransactionEventsPaginator(client ListTransactionEventsAPIClient, params *ListTransactionEventsInput, optFns ...func(*ListTransactionEventsPaginatorOptions)) *ListTransactionEventsPaginator { + if params == nil { + params = &ListTransactionEventsInput{} + } + + options := ListTransactionEventsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTransactionEventsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTransactionEventsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTransactionEvents page. +func (p *ListTransactionEventsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTransactionEventsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListTransactionEvents(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListTransactionEvents(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "managedblockchain-query", + OperationName: "ListTransactionEvents", + } +} diff --git a/service/managedblockchainquery/api_op_ListTransactions.go b/service/managedblockchainquery/api_op_ListTransactions.go new file mode 100644 index 00000000000..7bd4e5ac561 --- /dev/null +++ b/service/managedblockchainquery/api_op_ListTransactions.go @@ -0,0 +1,251 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all of the transactions on a given wallet address or to a specific +// contract. +func (c *Client) ListTransactions(ctx context.Context, params *ListTransactionsInput, optFns ...func(*Options)) (*ListTransactionsOutput, error) { + if params == nil { + params = &ListTransactionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTransactions", params, optFns, c.addOperationListTransactionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTransactionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTransactionsInput struct { + + // The address (either a contract or wallet), whose transactions are being + // requested. + // + // This member is required. + Address *string + + // The blockchain network where the transactions occurred. + // + // This member is required. + Network types.QueryNetwork + + // The container for time. + FromBlockchainInstant *types.BlockchainInstant + + // The maximum number of transactions to list. Even if additional results can be + // retrieved, the request can return less results than maxResults or an empty + // array of results. To retrieve the next set of results, make another request with + // the returned nextToken value. The value of nextToken is null when there are no + // more results to return + MaxResults *int32 + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + // Sorts items in an ascending order if the first page starts at fromTime . Sorts + // items in a descending order if the first page starts at toTime . + Sort *types.ListTransactionsSort + + // The container for time. + ToBlockchainInstant *types.BlockchainInstant + + noSmithyDocumentSerde +} + +type ListTransactionsOutput struct { + + // The array of transactions returned by the request. + // + // This member is required. + Transactions []types.TransactionOutputItem + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTransactionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTransactions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTransactions{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTransactionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTransactions(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListTransactionsAPIClient is a client that implements the ListTransactions +// operation. +type ListTransactionsAPIClient interface { + ListTransactions(context.Context, *ListTransactionsInput, ...func(*Options)) (*ListTransactionsOutput, error) +} + +var _ ListTransactionsAPIClient = (*Client)(nil) + +// ListTransactionsPaginatorOptions is the paginator options for ListTransactions +type ListTransactionsPaginatorOptions struct { + // The maximum number of transactions to list. Even if additional results can be + // retrieved, the request can return less results than maxResults or an empty + // array of results. To retrieve the next set of results, make another request with + // the returned nextToken value. The value of nextToken is null when there are no + // more results to return + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTransactionsPaginator is a paginator for ListTransactions +type ListTransactionsPaginator struct { + options ListTransactionsPaginatorOptions + client ListTransactionsAPIClient + params *ListTransactionsInput + nextToken *string + firstPage bool +} + +// NewListTransactionsPaginator returns a new ListTransactionsPaginator +func NewListTransactionsPaginator(client ListTransactionsAPIClient, params *ListTransactionsInput, optFns ...func(*ListTransactionsPaginatorOptions)) *ListTransactionsPaginator { + if params == nil { + params = &ListTransactionsInput{} + } + + options := ListTransactionsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTransactionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTransactionsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTransactions page. +func (p *ListTransactionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTransactionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListTransactions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListTransactions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "managedblockchain-query", + OperationName: "ListTransactions", + } +} diff --git a/service/managedblockchainquery/deserializers.go b/service/managedblockchainquery/deserializers.go new file mode 100644 index 00000000000..915cda343a3 --- /dev/null +++ b/service/managedblockchainquery/deserializers.go @@ -0,0 +1,2650 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strconv" + "strings" +) + +type awsRestjson1_deserializeOpBatchGetTokenBalance struct { +} + +func (*awsRestjson1_deserializeOpBatchGetTokenBalance) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpBatchGetTokenBalance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorBatchGetTokenBalance(response, &metadata) + } + output := &BatchGetTokenBalanceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentBatchGetTokenBalanceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorBatchGetTokenBalance(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentBatchGetTokenBalanceOutput(v **BatchGetTokenBalanceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchGetTokenBalanceOutput + if *v == nil { + sv = &BatchGetTokenBalanceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "errors": + if err := awsRestjson1_deserializeDocumentBatchGetTokenBalanceErrors(&sv.Errors, value); err != nil { + return err + } + + case "tokenBalances": + if err := awsRestjson1_deserializeDocumentBatchGetTokenBalanceOutputList(&sv.TokenBalances, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetTokenBalance struct { +} + +func (*awsRestjson1_deserializeOpGetTokenBalance) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetTokenBalance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetTokenBalance(response, &metadata) + } + output := &GetTokenBalanceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetTokenBalanceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetTokenBalance(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetTokenBalanceOutput(v **GetTokenBalanceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetTokenBalanceOutput + if *v == nil { + sv = &GetTokenBalanceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "atBlockchainInstant": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.AtBlockchainInstant, value); err != nil { + return err + } + + case "balance": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Balance = ptr.String(jtv) + } + + case "lastUpdatedTime": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.LastUpdatedTime, value); err != nil { + return err + } + + case "ownerIdentifier": + if err := awsRestjson1_deserializeDocumentOwnerIdentifier(&sv.OwnerIdentifier, value); err != nil { + return err + } + + case "tokenIdentifier": + if err := awsRestjson1_deserializeDocumentTokenIdentifier(&sv.TokenIdentifier, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetTransaction struct { +} + +func (*awsRestjson1_deserializeOpGetTransaction) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetTransaction) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetTransaction(response, &metadata) + } + output := &GetTransactionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetTransactionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetTransaction(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetTransactionOutput(v **GetTransactionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetTransactionOutput + if *v == nil { + sv = &GetTransactionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "transaction": + if err := awsRestjson1_deserializeDocumentTransaction(&sv.Transaction, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTokenBalances struct { +} + +func (*awsRestjson1_deserializeOpListTokenBalances) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTokenBalances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTokenBalances(response, &metadata) + } + output := &ListTokenBalancesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTokenBalancesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTokenBalances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTokenBalancesOutput(v **ListTokenBalancesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTokenBalancesOutput + if *v == nil { + sv = &ListTokenBalancesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "tokenBalances": + if err := awsRestjson1_deserializeDocumentTokenBalanceList(&sv.TokenBalances, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTransactionEvents struct { +} + +func (*awsRestjson1_deserializeOpListTransactionEvents) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTransactionEvents) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTransactionEvents(response, &metadata) + } + output := &ListTransactionEventsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTransactionEventsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTransactionEvents(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTransactionEventsOutput(v **ListTransactionEventsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTransactionEventsOutput + if *v == nil { + sv = &ListTransactionEventsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "events": + if err := awsRestjson1_deserializeDocumentTransactionEventList(&sv.Events, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTransactions struct { +} + +func (*awsRestjson1_deserializeOpListTransactions) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTransactions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTransactions(response, &metadata) + } + output := &ListTransactionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTransactionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTransactions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTransactionsOutput(v **ListTransactionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTransactionsOutput + if *v == nil { + sv = &ListTransactionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "transactions": + if err := awsRestjson1_deserializeDocumentTransactionOutputList(&sv.Transactions, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeOpHttpBindingsInternalServerException(v *types.InternalServerException, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("Retry-After"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.RetryAfterSeconds = ptr.Int32(int32(vv)) + } + + return nil +} +func awsRestjson1_deserializeOpHttpBindingsThrottlingException(v *types.ThrottlingException, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("Retry-After"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.RetryAfterSeconds = ptr.Int32(int32(vv)) + } + + return nil +} +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + if err := awsRestjson1_deserializeOpHttpBindingsInternalServerException(output, response); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response error with invalid HTTP bindings, %w", err)} + } + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorServiceQuotaExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ServiceQuotaExceededException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentServiceQuotaExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ThrottlingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentThrottlingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + if err := awsRestjson1_deserializeOpHttpBindingsThrottlingException(output, response); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response error with invalid HTTP bindings, %w", err)} + } + + return output +} + +func awsRestjson1_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ValidationException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentValidationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentBatchGetTokenBalanceErrorItem(v **types.BatchGetTokenBalanceErrorItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BatchGetTokenBalanceErrorItem + if *v == nil { + sv = &types.BatchGetTokenBalanceErrorItem{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "atBlockchainInstant": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.AtBlockchainInstant, value); err != nil { + return err + } + + case "errorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ErrorCode = ptr.String(jtv) + } + + case "errorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "errorType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorType to be of type string, got %T instead", value) + } + sv.ErrorType = types.ErrorType(jtv) + } + + case "ownerIdentifier": + if err := awsRestjson1_deserializeDocumentOwnerIdentifier(&sv.OwnerIdentifier, value); err != nil { + return err + } + + case "tokenIdentifier": + if err := awsRestjson1_deserializeDocumentTokenIdentifier(&sv.TokenIdentifier, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentBatchGetTokenBalanceErrors(v *[]types.BatchGetTokenBalanceErrorItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.BatchGetTokenBalanceErrorItem + if *v == nil { + cv = []types.BatchGetTokenBalanceErrorItem{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.BatchGetTokenBalanceErrorItem + destAddr := &col + if err := awsRestjson1_deserializeDocumentBatchGetTokenBalanceErrorItem(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentBatchGetTokenBalanceOutputItem(v **types.BatchGetTokenBalanceOutputItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BatchGetTokenBalanceOutputItem + if *v == nil { + sv = &types.BatchGetTokenBalanceOutputItem{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "atBlockchainInstant": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.AtBlockchainInstant, value); err != nil { + return err + } + + case "balance": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Balance = ptr.String(jtv) + } + + case "lastUpdatedTime": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.LastUpdatedTime, value); err != nil { + return err + } + + case "ownerIdentifier": + if err := awsRestjson1_deserializeDocumentOwnerIdentifier(&sv.OwnerIdentifier, value); err != nil { + return err + } + + case "tokenIdentifier": + if err := awsRestjson1_deserializeDocumentTokenIdentifier(&sv.TokenIdentifier, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentBatchGetTokenBalanceOutputList(v *[]types.BatchGetTokenBalanceOutputItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.BatchGetTokenBalanceOutputItem + if *v == nil { + cv = []types.BatchGetTokenBalanceOutputItem{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.BatchGetTokenBalanceOutputItem + destAddr := &col + if err := awsRestjson1_deserializeDocumentBatchGetTokenBalanceOutputItem(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentBlockchainInstant(v **types.BlockchainInstant, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BlockchainInstant + if *v == nil { + sv = &types.BlockchainInstant{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "time": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Time = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "retryAfterSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.RetryAfterSeconds = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentOwnerIdentifier(v **types.OwnerIdentifier, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OwnerIdentifier + if *v == nil { + sv = &types.OwnerIdentifier{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "address": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.Address = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "resourceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceId to be of type string, got %T instead", value) + } + sv.ResourceId = ptr.String(jtv) + } + + case "resourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceType to be of type string, got %T instead", value) + } + sv.ResourceType = types.ResourceType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentServiceQuotaExceededException(v **types.ServiceQuotaExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceQuotaExceededException + if *v == nil { + sv = &types.ServiceQuotaExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "quotaCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QuotaCode to be of type string, got %T instead", value) + } + sv.QuotaCode = ptr.String(jtv) + } + + case "resourceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceId to be of type string, got %T instead", value) + } + sv.ResourceId = ptr.String(jtv) + } + + case "resourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceType to be of type string, got %T instead", value) + } + sv.ResourceType = types.ResourceType(jtv) + } + + case "serviceCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ServiceCode to be of type string, got %T instead", value) + } + sv.ServiceCode = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentThrottlingException(v **types.ThrottlingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ThrottlingException + if *v == nil { + sv = &types.ThrottlingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "quotaCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QuotaCode to be of type string, got %T instead", value) + } + sv.QuotaCode = ptr.String(jtv) + } + + case "retryAfterSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.RetryAfterSeconds = ptr.Int32(int32(i64)) + } + + case "serviceCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ServiceCode to be of type string, got %T instead", value) + } + sv.ServiceCode = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTokenBalance(v **types.TokenBalance, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TokenBalance + if *v == nil { + sv = &types.TokenBalance{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "atBlockchainInstant": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.AtBlockchainInstant, value); err != nil { + return err + } + + case "balance": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Balance = ptr.String(jtv) + } + + case "lastUpdatedTime": + if err := awsRestjson1_deserializeDocumentBlockchainInstant(&sv.LastUpdatedTime, value); err != nil { + return err + } + + case "ownerIdentifier": + if err := awsRestjson1_deserializeDocumentOwnerIdentifier(&sv.OwnerIdentifier, value); err != nil { + return err + } + + case "tokenIdentifier": + if err := awsRestjson1_deserializeDocumentTokenIdentifier(&sv.TokenIdentifier, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTokenBalanceList(v *[]types.TokenBalance, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TokenBalance + if *v == nil { + cv = []types.TokenBalance{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TokenBalance + destAddr := &col + if err := awsRestjson1_deserializeDocumentTokenBalance(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTokenIdentifier(v **types.TokenIdentifier, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TokenIdentifier + if *v == nil { + sv = &types.TokenIdentifier{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "contractAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.ContractAddress = ptr.String(jtv) + } + + case "network": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryNetwork to be of type string, got %T instead", value) + } + sv.Network = types.QueryNetwork(jtv) + } + + case "tokenId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryTokenId to be of type string, got %T instead", value) + } + sv.TokenId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTransaction(v **types.Transaction, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Transaction + if *v == nil { + sv = &types.Transaction{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "blockHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BlockHash to be of type string, got %T instead", value) + } + sv.BlockHash = ptr.String(jtv) + } + + case "blockNumber": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.BlockNumber = ptr.String(jtv) + } + + case "contractAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.ContractAddress = ptr.String(jtv) + } + + case "cumulativeGasUsed": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CumulativeGasUsed = ptr.String(jtv) + } + + case "effectiveGasPrice": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.EffectiveGasPrice = ptr.String(jtv) + } + + case "from": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.From = ptr.String(jtv) + } + + case "gasUsed": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.GasUsed = ptr.String(jtv) + } + + case "network": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryNetwork to be of type string, got %T instead", value) + } + sv.Network = types.QueryNetwork(jtv) + } + + case "numberOfTransactions": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumberOfTransactions = ptr.Int64(i64) + } + + case "signatureR": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SignatureR = ptr.String(jtv) + } + + case "signatureS": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SignatureS = ptr.String(jtv) + } + + case "signatureV": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SignatureV = ptr.Int32(int32(i64)) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryTransactionStatus to be of type string, got %T instead", value) + } + sv.Status = types.QueryTransactionStatus(jtv) + } + + case "to": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.To = ptr.String(jtv) + } + + case "transactionFee": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TransactionFee = ptr.String(jtv) + } + + case "transactionHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryTransactionHash to be of type string, got %T instead", value) + } + sv.TransactionHash = ptr.String(jtv) + } + + case "transactionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TransactionId = ptr.String(jtv) + } + + case "transactionIndex": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TransactionIndex = ptr.Int64(i64) + } + + case "transactionTimestamp": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.TransactionTimestamp = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTransactionEvent(v **types.TransactionEvent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TransactionEvent + if *v == nil { + sv = &types.TransactionEvent{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "contractAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.ContractAddress = ptr.String(jtv) + } + + case "eventType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryTransactionEventType to be of type string, got %T instead", value) + } + sv.EventType = types.QueryTransactionEventType(jtv) + } + + case "from": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.From = ptr.String(jtv) + } + + case "network": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryNetwork to be of type string, got %T instead", value) + } + sv.Network = types.QueryNetwork(jtv) + } + + case "to": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ChainAddress to be of type string, got %T instead", value) + } + sv.To = ptr.String(jtv) + } + + case "tokenId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryTokenId to be of type string, got %T instead", value) + } + sv.TokenId = ptr.String(jtv) + } + + case "transactionHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryTransactionHash to be of type string, got %T instead", value) + } + sv.TransactionHash = ptr.String(jtv) + } + + case "transactionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TransactionId = ptr.String(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + case "voutIndex": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.VoutIndex = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTransactionEventList(v *[]types.TransactionEvent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TransactionEvent + if *v == nil { + cv = []types.TransactionEvent{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TransactionEvent + destAddr := &col + if err := awsRestjson1_deserializeDocumentTransactionEvent(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTransactionOutputItem(v **types.TransactionOutputItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TransactionOutputItem + if *v == nil { + sv = &types.TransactionOutputItem{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "network": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryNetwork to be of type string, got %T instead", value) + } + sv.Network = types.QueryNetwork(jtv) + } + + case "transactionHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryTransactionHash to be of type string, got %T instead", value) + } + sv.TransactionHash = ptr.String(jtv) + } + + case "transactionTimestamp": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.TransactionTimestamp = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTransactionOutputList(v *[]types.TransactionOutputItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TransactionOutputItem + if *v == nil { + cv = []types.TransactionOutputItem{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TransactionOutputItem + destAddr := &col + if err := awsRestjson1_deserializeDocumentTransactionOutputItem(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ValidationException + if *v == nil { + sv = &types.ValidationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "fieldList": + if err := awsRestjson1_deserializeDocumentValidationExceptionFieldList(&sv.FieldList, value); err != nil { + return err + } + + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ValidationExceptionReason to be of type string, got %T instead", value) + } + sv.Reason = types.ValidationExceptionReason(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentValidationExceptionField(v **types.ValidationExceptionField, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ValidationExceptionField + if *v == nil { + sv = &types.ValidationExceptionField{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentValidationExceptionFieldList(v *[]types.ValidationExceptionField, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ValidationExceptionField + if *v == nil { + cv = []types.ValidationExceptionField{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ValidationExceptionField + destAddr := &col + if err := awsRestjson1_deserializeDocumentValidationExceptionField(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} diff --git a/service/managedblockchainquery/doc.go b/service/managedblockchainquery/doc.go new file mode 100644 index 00000000000..d05d6587d59 --- /dev/null +++ b/service/managedblockchainquery/doc.go @@ -0,0 +1,15 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package managedblockchainquery provides the API client, operations, and +// parameter types for Amazon Managed Blockchain Query. +// +// Amazon Managed Blockchain (AMB) Query provides you with convenient access to +// multi-blockchain network data, which makes it easier for you to extract +// contextual data related to blockchain activity. You can use AMB Query to read +// data from public blockchain networks, such as Bitcoin Mainnet and Ethereum +// Mainnet. You can also get information such as the current and historical +// balances of addresses, or you can get a list of blockchain transactions for a +// given time period. Additionally, you can get details of a given transaction, +// such as transaction events, which you can further analyze or use in business +// logic for your applications. +package managedblockchainquery diff --git a/service/managedblockchainquery/endpoints.go b/service/managedblockchainquery/endpoints.go new file mode 100644 index 00000000000..fbe87fb802c --- /dev/null +++ b/service/managedblockchainquery/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "managedblockchain-query" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/service/managedblockchainquery/generated.json b/service/managedblockchainquery/generated.json new file mode 100644 index 00000000000..e03a16eb864 --- /dev/null +++ b/service/managedblockchainquery/generated.json @@ -0,0 +1,33 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_BatchGetTokenBalance.go", + "api_op_GetTokenBalance.go", + "api_op_GetTransaction.go", + "api_op_ListTokenBalances.go", + "api_op_ListTransactionEvents.go", + "api_op_ListTransactions.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery", + "unstable": false +} diff --git a/service/managedblockchainquery/go.mod b/service/managedblockchainquery/go.mod new file mode 100644 index 00000000000..9be39cccf56 --- /dev/null +++ b/service/managedblockchainquery/go.mod @@ -0,0 +1,16 @@ +module github.com/aws/aws-sdk-go-v2/service/managedblockchainquery + +go 1.15 + +require ( + github.com/aws/aws-sdk-go-v2 v1.19.0 + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 + github.com/aws/smithy-go v1.13.5 +) + +replace github.com/aws/aws-sdk-go-v2 => ../../ + +replace github.com/aws/aws-sdk-go-v2/internal/configsources => ../../internal/configsources/ + +replace github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 => ../../internal/endpoints/v2/ diff --git a/service/managedblockchainquery/go.sum b/service/managedblockchainquery/go.sum new file mode 100644 index 00000000000..6f859610e44 --- /dev/null +++ b/service/managedblockchainquery/go.sum @@ -0,0 +1,11 @@ +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/service/managedblockchainquery/go_module_metadata.go b/service/managedblockchainquery/go_module_metadata.go new file mode 100644 index 00000000000..aca5471447a --- /dev/null +++ b/service/managedblockchainquery/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package managedblockchainquery + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "tip" diff --git a/service/managedblockchainquery/internal/endpoints/endpoints.go b/service/managedblockchainquery/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..68d27e398b1 --- /dev/null +++ b/service/managedblockchainquery/internal/endpoints/endpoints.go @@ -0,0 +1,296 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver ManagedBlockchain Query endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "managedblockchain-query.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "managedblockchain-query.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "managedblockchain-query.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "managedblockchain-query.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "managedblockchain-query.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "managedblockchain-query.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "managedblockchain-query.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "managedblockchain-query.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "managedblockchain-query.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "managedblockchain-query-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "managedblockchain-query.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + }, +} diff --git a/service/managedblockchainquery/internal/endpoints/endpoints_test.go b/service/managedblockchainquery/internal/endpoints/endpoints_test.go new file mode 100644 index 00000000000..08e5da2d833 --- /dev/null +++ b/service/managedblockchainquery/internal/endpoints/endpoints_test.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "testing" +) + +func TestRegexCompile(t *testing.T) { + _ = defaultPartitions +} diff --git a/service/managedblockchainquery/protocol_test.go b/service/managedblockchainquery/protocol_test.go new file mode 100644 index 00000000000..8f8c575e3e3 --- /dev/null +++ b/service/managedblockchainquery/protocol_test.go @@ -0,0 +1,3 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery diff --git a/service/managedblockchainquery/serializers.go b/service/managedblockchainquery/serializers.go new file mode 100644 index 00000000000..04ace155213 --- /dev/null +++ b/service/managedblockchainquery/serializers.go @@ -0,0 +1,655 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpBatchGetTokenBalance struct { +} + +func (*awsRestjson1_serializeOpBatchGetTokenBalance) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpBatchGetTokenBalance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchGetTokenBalanceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/batch-get-token-balance") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentBatchGetTokenBalanceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsBatchGetTokenBalanceInput(v *BatchGetTokenBalanceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentBatchGetTokenBalanceInput(v *BatchGetTokenBalanceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GetTokenBalanceInputs != nil { + ok := object.Key("getTokenBalanceInputs") + if err := awsRestjson1_serializeDocumentGetTokenBalanceInputList(v.GetTokenBalanceInputs, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetTokenBalance struct { +} + +func (*awsRestjson1_serializeOpGetTokenBalance) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetTokenBalance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetTokenBalanceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/get-token-balance") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentGetTokenBalanceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetTokenBalanceInput(v *GetTokenBalanceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentGetTokenBalanceInput(v *GetTokenBalanceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AtBlockchainInstant != nil { + ok := object.Key("atBlockchainInstant") + if err := awsRestjson1_serializeDocumentBlockchainInstant(v.AtBlockchainInstant, ok); err != nil { + return err + } + } + + if v.OwnerIdentifier != nil { + ok := object.Key("ownerIdentifier") + if err := awsRestjson1_serializeDocumentOwnerIdentifier(v.OwnerIdentifier, ok); err != nil { + return err + } + } + + if v.TokenIdentifier != nil { + ok := object.Key("tokenIdentifier") + if err := awsRestjson1_serializeDocumentTokenIdentifier(v.TokenIdentifier, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetTransaction struct { +} + +func (*awsRestjson1_serializeOpGetTransaction) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetTransaction) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetTransactionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/get-transaction") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentGetTransactionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetTransactionInput(v *GetTransactionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentGetTransactionInput(v *GetTransactionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Network) > 0 { + ok := object.Key("network") + ok.String(string(v.Network)) + } + + if v.TransactionHash != nil { + ok := object.Key("transactionHash") + ok.String(*v.TransactionHash) + } + + return nil +} + +type awsRestjson1_serializeOpListTokenBalances struct { +} + +func (*awsRestjson1_serializeOpListTokenBalances) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTokenBalances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTokenBalancesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/list-token-balances") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentListTokenBalancesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTokenBalancesInput(v *ListTokenBalancesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentListTokenBalancesInput(v *ListTokenBalancesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.OwnerFilter != nil { + ok := object.Key("ownerFilter") + if err := awsRestjson1_serializeDocumentOwnerFilter(v.OwnerFilter, ok); err != nil { + return err + } + } + + if v.TokenFilter != nil { + ok := object.Key("tokenFilter") + if err := awsRestjson1_serializeDocumentTokenFilter(v.TokenFilter, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpListTransactionEvents struct { +} + +func (*awsRestjson1_serializeOpListTransactionEvents) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTransactionEvents) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTransactionEventsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/list-transaction-events") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentListTransactionEventsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTransactionEventsInput(v *ListTransactionEventsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentListTransactionEventsInput(v *ListTransactionEventsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if len(v.Network) > 0 { + ok := object.Key("network") + ok.String(string(v.Network)) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.TransactionHash != nil { + ok := object.Key("transactionHash") + ok.String(*v.TransactionHash) + } + + return nil +} + +type awsRestjson1_serializeOpListTransactions struct { +} + +func (*awsRestjson1_serializeOpListTransactions) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTransactions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTransactionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/list-transactions") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentListTransactionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTransactionsInput(v *ListTransactionsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentListTransactionsInput(v *ListTransactionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Address != nil { + ok := object.Key("address") + ok.String(*v.Address) + } + + if v.FromBlockchainInstant != nil { + ok := object.Key("fromBlockchainInstant") + if err := awsRestjson1_serializeDocumentBlockchainInstant(v.FromBlockchainInstant, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if len(v.Network) > 0 { + ok := object.Key("network") + ok.String(string(v.Network)) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.Sort != nil { + ok := object.Key("sort") + if err := awsRestjson1_serializeDocumentListTransactionsSort(v.Sort, ok); err != nil { + return err + } + } + + if v.ToBlockchainInstant != nil { + ok := object.Key("toBlockchainInstant") + if err := awsRestjson1_serializeDocumentBlockchainInstant(v.ToBlockchainInstant, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentBatchGetTokenBalanceInputItem(v *types.BatchGetTokenBalanceInputItem, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AtBlockchainInstant != nil { + ok := object.Key("atBlockchainInstant") + if err := awsRestjson1_serializeDocumentBlockchainInstant(v.AtBlockchainInstant, ok); err != nil { + return err + } + } + + if v.OwnerIdentifier != nil { + ok := object.Key("ownerIdentifier") + if err := awsRestjson1_serializeDocumentOwnerIdentifier(v.OwnerIdentifier, ok); err != nil { + return err + } + } + + if v.TokenIdentifier != nil { + ok := object.Key("tokenIdentifier") + if err := awsRestjson1_serializeDocumentTokenIdentifier(v.TokenIdentifier, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentBlockchainInstant(v *types.BlockchainInstant, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Time != nil { + ok := object.Key("time") + ok.Double(smithytime.FormatEpochSeconds(*v.Time)) + } + + return nil +} + +func awsRestjson1_serializeDocumentGetTokenBalanceInputList(v []types.BatchGetTokenBalanceInputItem, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentBatchGetTokenBalanceInputItem(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentListTransactionsSort(v *types.ListTransactionsSort, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.SortBy) > 0 { + ok := object.Key("sortBy") + ok.String(string(v.SortBy)) + } + + if len(v.SortOrder) > 0 { + ok := object.Key("sortOrder") + ok.String(string(v.SortOrder)) + } + + return nil +} + +func awsRestjson1_serializeDocumentOwnerFilter(v *types.OwnerFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Address != nil { + ok := object.Key("address") + ok.String(*v.Address) + } + + return nil +} + +func awsRestjson1_serializeDocumentOwnerIdentifier(v *types.OwnerIdentifier, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Address != nil { + ok := object.Key("address") + ok.String(*v.Address) + } + + return nil +} + +func awsRestjson1_serializeDocumentTokenFilter(v *types.TokenFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContractAddress != nil { + ok := object.Key("contractAddress") + ok.String(*v.ContractAddress) + } + + if len(v.Network) > 0 { + ok := object.Key("network") + ok.String(string(v.Network)) + } + + if v.TokenId != nil { + ok := object.Key("tokenId") + ok.String(*v.TokenId) + } + + return nil +} + +func awsRestjson1_serializeDocumentTokenIdentifier(v *types.TokenIdentifier, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ContractAddress != nil { + ok := object.Key("contractAddress") + ok.String(*v.ContractAddress) + } + + if len(v.Network) > 0 { + ok := object.Key("network") + ok.String(string(v.Network)) + } + + if v.TokenId != nil { + ok := object.Key("tokenId") + ok.String(*v.TokenId) + } + + return nil +} diff --git a/service/managedblockchainquery/types/enums.go b/service/managedblockchainquery/types/enums.go new file mode 100644 index 00000000000..a9b58f0adcc --- /dev/null +++ b/service/managedblockchainquery/types/enums.go @@ -0,0 +1,185 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type ErrorType string + +// Enum values for ErrorType +const ( + // An API request validation exception + ErrorTypeValidationException ErrorType = "VALIDATION_EXCEPTION" + // An API request retrieving an item that can't be found + ErrorTypeResourceNotFoundException ErrorType = "RESOURCE_NOT_FOUND_EXCEPTION" +) + +// Values returns all known values for ErrorType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (ErrorType) Values() []ErrorType { + return []ErrorType{ + "VALIDATION_EXCEPTION", + "RESOURCE_NOT_FOUND_EXCEPTION", + } +} + +type ListTransactionsSortBy string + +// Enum values for ListTransactionsSortBy +const ( + // Timestamp of a transaction + ListTransactionsSortByTransactionTimestamp ListTransactionsSortBy = "TRANSACTION_TIMESTAMP" +) + +// Values returns all known values for ListTransactionsSortBy. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ListTransactionsSortBy) Values() []ListTransactionsSortBy { + return []ListTransactionsSortBy{ + "TRANSACTION_TIMESTAMP", + } +} + +type QueryNetwork string + +// Enum values for QueryNetwork +const ( + // Ethereum main network + QueryNetworkEthereumMainnet QueryNetwork = "ETHEREUM_MAINNET" + // Bitcoin main network + QueryNetworkBitcoinMainnet QueryNetwork = "BITCOIN_MAINNET" +) + +// Values returns all known values for QueryNetwork. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (QueryNetwork) Values() []QueryNetwork { + return []QueryNetwork{ + "ETHEREUM_MAINNET", + "BITCOIN_MAINNET", + } +} + +type QueryTransactionEventType string + +// Enum values for QueryTransactionEventType +const ( + // An ERC20 transfer type + QueryTransactionEventTypeErc20Transfer QueryTransactionEventType = "ERC20_TRANSFER" + // An ERC20_MINT transfer type + QueryTransactionEventTypeErc20Mint QueryTransactionEventType = "ERC20_MINT" + // An ERC20_BURN transfer type + QueryTransactionEventTypeErc20Burn QueryTransactionEventType = "ERC20_BURN" + // An ERC20_DEPOSIT transfer type + QueryTransactionEventTypeErc20Deposit QueryTransactionEventType = "ERC20_DEPOSIT" + // An ERC20_WITHDRAWAL transfer type + QueryTransactionEventTypeErc20Withdrawal QueryTransactionEventType = "ERC20_WITHDRAWAL" + // An ERC721 transfer type + QueryTransactionEventTypeErc721Transfer QueryTransactionEventType = "ERC721_TRANSFER" + // An ERC1155 transfer type + QueryTransactionEventTypeErc1155Transfer QueryTransactionEventType = "ERC1155_TRANSFER" + // A Bitcoin Vin transfer type + QueryTransactionEventTypeBitcoinVin QueryTransactionEventType = "BITCOIN_VIN" + // A Bitcoin Vout transfer type + QueryTransactionEventTypeBitcoinVout QueryTransactionEventType = "BITCOIN_VOUT" + // An internal ETH transfer type + QueryTransactionEventTypeInternalEthTransfer QueryTransactionEventType = "INTERNAL_ETH_TRANSFER" + // An ETH transfer type + QueryTransactionEventTypeEthTransfer QueryTransactionEventType = "ETH_TRANSFER" +) + +// Values returns all known values for QueryTransactionEventType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (QueryTransactionEventType) Values() []QueryTransactionEventType { + return []QueryTransactionEventType{ + "ERC20_TRANSFER", + "ERC20_MINT", + "ERC20_BURN", + "ERC20_DEPOSIT", + "ERC20_WITHDRAWAL", + "ERC721_TRANSFER", + "ERC1155_TRANSFER", + "BITCOIN_VIN", + "BITCOIN_VOUT", + "INTERNAL_ETH_TRANSFER", + "ETH_TRANSFER", + } +} + +type QueryTransactionStatus string + +// Enum values for QueryTransactionStatus +const ( + // The transaction has been confirmed and is final in the blockchain + QueryTransactionStatusFinal QueryTransactionStatus = "FINAL" + // The transaction completed on the blockchain, but failed + QueryTransactionStatusFailed QueryTransactionStatus = "FAILED" +) + +// Values returns all known values for QueryTransactionStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (QueryTransactionStatus) Values() []QueryTransactionStatus { + return []QueryTransactionStatus{ + "FINAL", + "FAILED", + } +} + +type ResourceType string + +// Enum values for ResourceType +const ( + ResourceTypeCollection ResourceType = "collection" +) + +// Values returns all known values for ResourceType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ResourceType) Values() []ResourceType { + return []ResourceType{ + "collection", + } +} + +type SortOrder string + +// Enum values for SortOrder +const ( + // Result sorted in ascending order + SortOrderAscending SortOrder = "ASCENDING" + // Result sorted in descending order + SortOrderDescending SortOrder = "DESCENDING" +) + +// Values returns all known values for SortOrder. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (SortOrder) Values() []SortOrder { + return []SortOrder{ + "ASCENDING", + "DESCENDING", + } +} + +type ValidationExceptionReason string + +// Enum values for ValidationExceptionReason +const ( + ValidationExceptionReasonUnknownOperation ValidationExceptionReason = "unknownOperation" + ValidationExceptionReasonCannotParse ValidationExceptionReason = "cannotParse" + ValidationExceptionReasonFieldValidationFailed ValidationExceptionReason = "fieldValidationFailed" + ValidationExceptionReasonOther ValidationExceptionReason = "other" +) + +// Values returns all known values for ValidationExceptionReason. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ValidationExceptionReason) Values() []ValidationExceptionReason { + return []ValidationExceptionReason{ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other", + } +} diff --git a/service/managedblockchainquery/types/errors.go b/service/managedblockchainquery/types/errors.go new file mode 100644 index 00000000000..2e056e82655 --- /dev/null +++ b/service/managedblockchainquery/types/errors.go @@ -0,0 +1,184 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The Amazon Web Services account doesn’t have access to this resource. +type AccessDeniedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AccessDeniedException" + } + return *e.ErrorCodeOverride +} +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request processing has failed because of an internal error in the service. +type InternalServerException struct { + Message *string + + ErrorCodeOverride *string + + RetryAfterSeconds *int32 + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerException" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The resource was not found. +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + ResourceId *string + ResourceType ResourceType + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The service quota has been exceeded for this resource. +type ServiceQuotaExceededException struct { + Message *string + + ErrorCodeOverride *string + + ResourceId *string + ResourceType ResourceType + ServiceCode *string + QuotaCode *string + + noSmithyDocumentSerde +} + +func (e *ServiceQuotaExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceQuotaExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceQuotaExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ServiceQuotaExceededException" + } + return *e.ErrorCodeOverride +} +func (e *ServiceQuotaExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request a limit +// increase or delete unused resources, if possible. +type ThrottlingException struct { + Message *string + + ErrorCodeOverride *string + + ServiceCode *string + QuotaCode *string + RetryAfterSeconds *int32 + + noSmithyDocumentSerde +} + +func (e *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ThrottlingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ThrottlingException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ThrottlingException" + } + return *e.ErrorCodeOverride +} +func (e *ThrottlingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The resource passed is invalid. +type ValidationException struct { + Message *string + + ErrorCodeOverride *string + + Reason ValidationExceptionReason + FieldList []ValidationExceptionField + + noSmithyDocumentSerde +} + +func (e *ValidationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ValidationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ValidationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ValidationException" + } + return *e.ErrorCodeOverride +} +func (e *ValidationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/service/managedblockchainquery/types/types.go b/service/managedblockchainquery/types/types.go new file mode 100644 index 00000000000..83a71b381d9 --- /dev/null +++ b/service/managedblockchainquery/types/types.go @@ -0,0 +1,376 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Error generated from a failed BatchGetTokenBalance request. +type BatchGetTokenBalanceErrorItem struct { + + // The error code associated with the error. + // + // This member is required. + ErrorCode *string + + // The message associated with the error. + // + // This member is required. + ErrorMessage *string + + // The type of error. + // + // This member is required. + ErrorType ErrorType + + // The container for time. + AtBlockchainInstant *BlockchainInstant + + // The container for the identifier of the owner. + OwnerIdentifier *OwnerIdentifier + + // The container for the identifier for the token including the unique token ID + // and its blockchain network. Only the native tokens BTC,ETH, and the ERC-20, + // ERC-721, and ERC 1155 token standards are supported. + TokenIdentifier *TokenIdentifier + + noSmithyDocumentSerde +} + +// The container for the input for getting a token balance. +type BatchGetTokenBalanceInputItem struct { + + // The container for the identifier of the owner. + // + // This member is required. + OwnerIdentifier *OwnerIdentifier + + // The container for the identifier for the token including the unique token ID + // and its blockchain network. Only the native tokens BTC,ETH, and the ERC-20, + // ERC-721, and ERC 1155 token standards are supported. + // + // This member is required. + TokenIdentifier *TokenIdentifier + + // The container for time. + AtBlockchainInstant *BlockchainInstant + + noSmithyDocumentSerde +} + +// The container for the properties of a token balance output. +type BatchGetTokenBalanceOutputItem struct { + + // The container for time. + // + // This member is required. + AtBlockchainInstant *BlockchainInstant + + // The container for the token balance. + // + // This member is required. + Balance *string + + // The container for time. + LastUpdatedTime *BlockchainInstant + + // The container for the identifier of the owner. + OwnerIdentifier *OwnerIdentifier + + // The container for the identifier for the token including the unique token ID + // and its blockchain network. Only the native tokens BTC,ETH, and the ERC-20, + // ERC-721, and ERC 1155 token standards are supported. + TokenIdentifier *TokenIdentifier + + noSmithyDocumentSerde +} + +// The container for time. +type BlockchainInstant struct { + + // The container of the Timestamp of the blockchain instant. This timestamp will + // only be recorded up to the second. + Time *time.Time + + noSmithyDocumentSerde +} + +// The container for determining how the list transaction result will be sorted. +type ListTransactionsSort struct { + + // Defaults to the value TRANSACTION_TIMESTAMP . + SortBy ListTransactionsSortBy + + // The container for the sort order for ListTransactions . The SortOrder field + // only accepts the values ASCENDING and DESCENDING . Not providing SortOrder will + // default to ASCENDING . + SortOrder SortOrder + + noSmithyDocumentSerde +} + +// The container for the owner information to filter by. +type OwnerFilter struct { + + // The contract or wallet address. + // + // This member is required. + Address *string + + noSmithyDocumentSerde +} + +// The container for the identifier of the owner. +type OwnerIdentifier struct { + + // The contract or wallet address for the owner. + // + // This member is required. + Address *string + + noSmithyDocumentSerde +} + +// The balance of the token. +type TokenBalance struct { + + // The time for when the TokenBalance is requested or the current time if a time + // is not provided in the request. This time will only be recorded up to the + // second. + // + // This member is required. + AtBlockchainInstant *BlockchainInstant + + // The container of the token balance. + // + // This member is required. + Balance *string + + // The timestamp of the last transaction at which the balance for the token in the + // wallet was updated. + LastUpdatedTime *BlockchainInstant + + // The container for the identifier of the owner. + OwnerIdentifier *OwnerIdentifier + + // The identifier for the token, including the unique token ID and its blockchain + // network. + TokenIdentifier *TokenIdentifier + + noSmithyDocumentSerde +} + +// The container of the token filter like the contract address on a given +// blockchain network or a unique token identifier on a given blockchain network. +// You must always specify the network property of this container when using this +// operation. +type TokenFilter struct { + + // The blockchain network of the token. + // + // This member is required. + Network QueryNetwork + + // This is the address of the contract. + ContractAddress *string + + // The unique identifier of the token. + TokenId *string + + noSmithyDocumentSerde +} + +// The container for the identifier for the token including the unique token ID +// and its blockchain network. Only the native tokens BTC,ETH, and the ERC-20, +// ERC-721, and ERC 1155 token standards are supported. +type TokenIdentifier struct { + + // The blockchain network of the token. + // + // This member is required. + Network QueryNetwork + + // This is the token's contract address. + ContractAddress *string + + // The unique identifier of the token. + TokenId *string + + noSmithyDocumentSerde +} + +// There are two possible types of transactions used for this data type: +// - A Bitcoin transaction is a movement of BTC from one address to another. +// - An Ethereum transaction refers to an action initiated by an externally +// owned account, which is an account managed by a human, not a contract. For +// example, if Bob sends Alice 1 ETH, Bob's account must be debited and Alice's +// must be credited. This state-changing action occurs within a transaction. +type Transaction struct { + + // The blockchain network where the transaction occured. + // + // This member is required. + Network QueryNetwork + + // The number of transactions in the block. + // + // This member is required. + NumberOfTransactions *int64 + + // The status of the transaction. + // + // This member is required. + Status QueryTransactionStatus + + // The identifier of the transaction. It is generated whenever a transaction is + // verified and added to the blockchain. + // + // This member is required. + To *string + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // This member is required. + TransactionHash *string + + // The index of the transaction within a blockchain. + // + // This member is required. + TransactionIndex *int64 + + // The Timestamp of the transaction. + // + // This member is required. + TransactionTimestamp *time.Time + + // The block hash is a unique identifier for a block. It is a fixed-size string + // that is calculated by using the information in the block. The block hash is used + // to verify the integrity of the data in the block. + BlockHash *string + + // The block number in which the transaction is recorded. + BlockNumber *string + + // The blockchain address for the contract. + ContractAddress *string + + // The amount of gas used up to the specified point in the block. + CumulativeGasUsed *string + + // The effective gas price. + EffectiveGasPrice *string + + // The initiator of the transaction. It is either in the form a public key or a + // contract address. + From *string + + // The amount of gas used for the transaction. + GasUsed *string + + // The signature of the transaction. The X coordinate of a point R. + SignatureR *string + + // The signature of the transaction. The Y coordinate of a point S. + SignatureS *string + + // The signature of the transaction. The Z coordinate of a point V. + SignatureV *int32 + + // The transaction fee. + TransactionFee *string + + // The unique identifier of the transaction. It is generated whenever a + // transaction is verified and added to the blockchain. + TransactionId *string + + noSmithyDocumentSerde +} + +// The container for the properties of a transaction event. +type TransactionEvent struct { + + // The type of transaction event. + // + // This member is required. + EventType QueryTransactionEventType + + // The blockchain network where the transaction occurred. + // + // This member is required. + Network QueryNetwork + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // This member is required. + TransactionHash *string + + // The blockchain address. for the contract + ContractAddress *string + + // The wallet address initiating the transaction. It can either be a public key or + // a contract. + From *string + + // The wallet address receiving the transaction. It can either be a public key or + // a contract. + To *string + + // The unique identifier for the token involved in the transaction. + TokenId *string + + // The unique identifier of the transaction. It is generated whenever a + // transaction is verified and added to the blockchain. + TransactionId *string + + // The value that was transacted. + Value *string + + // The position of the vout in the transaction output list. + VoutIndex *int32 + + noSmithyDocumentSerde +} + +// The container of the transaction output. +type TransactionOutputItem struct { + + // The blockchain network where the transaction occurred. + // + // This member is required. + Network QueryNetwork + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // This member is required. + TransactionHash *string + + // The time when the transaction occurred. + // + // This member is required. + TransactionTimestamp *time.Time + + noSmithyDocumentSerde +} + +// The resource passed is invalid. +type ValidationExceptionField struct { + + // The ValidationException message. + // + // This member is required. + Message *string + + // The name of the field that triggered the ValidationException . + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/managedblockchainquery/validators.go b/service/managedblockchainquery/validators.go new file mode 100644 index 00000000000..046036bbc0b --- /dev/null +++ b/service/managedblockchainquery/validators.go @@ -0,0 +1,379 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package managedblockchainquery + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/managedblockchainquery/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpBatchGetTokenBalance struct { +} + +func (*validateOpBatchGetTokenBalance) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchGetTokenBalance) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchGetTokenBalanceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchGetTokenBalanceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetTokenBalance struct { +} + +func (*validateOpGetTokenBalance) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetTokenBalance) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetTokenBalanceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetTokenBalanceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetTransaction struct { +} + +func (*validateOpGetTransaction) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetTransaction) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetTransactionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetTransactionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTokenBalances struct { +} + +func (*validateOpListTokenBalances) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTokenBalances) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTokenBalancesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTokenBalancesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTransactionEvents struct { +} + +func (*validateOpListTransactionEvents) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTransactionEvents) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTransactionEventsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTransactionEventsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTransactions struct { +} + +func (*validateOpListTransactions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTransactions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTransactionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTransactionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpBatchGetTokenBalanceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchGetTokenBalance{}, middleware.After) +} + +func addOpGetTokenBalanceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetTokenBalance{}, middleware.After) +} + +func addOpGetTransactionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetTransaction{}, middleware.After) +} + +func addOpListTokenBalancesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTokenBalances{}, middleware.After) +} + +func addOpListTransactionEventsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTransactionEvents{}, middleware.After) +} + +func addOpListTransactionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTransactions{}, middleware.After) +} + +func validateBatchGetTokenBalanceInputItem(v *types.BatchGetTokenBalanceInputItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetTokenBalanceInputItem"} + if v.TokenIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("TokenIdentifier")) + } else if v.TokenIdentifier != nil { + if err := validateTokenIdentifier(v.TokenIdentifier); err != nil { + invalidParams.AddNested("TokenIdentifier", err.(smithy.InvalidParamsError)) + } + } + if v.OwnerIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("OwnerIdentifier")) + } else if v.OwnerIdentifier != nil { + if err := validateOwnerIdentifier(v.OwnerIdentifier); err != nil { + invalidParams.AddNested("OwnerIdentifier", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGetTokenBalanceInputList(v []types.BatchGetTokenBalanceInputItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetTokenBalanceInputList"} + for i := range v { + if err := validateBatchGetTokenBalanceInputItem(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnerFilter(v *types.OwnerFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnerFilter"} + if v.Address == nil { + invalidParams.Add(smithy.NewErrParamRequired("Address")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnerIdentifier(v *types.OwnerIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnerIdentifier"} + if v.Address == nil { + invalidParams.Add(smithy.NewErrParamRequired("Address")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTokenFilter(v *types.TokenFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TokenFilter"} + if len(v.Network) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Network")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTokenIdentifier(v *types.TokenIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TokenIdentifier"} + if len(v.Network) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Network")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchGetTokenBalanceInput(v *BatchGetTokenBalanceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetTokenBalanceInput"} + if v.GetTokenBalanceInputs != nil { + if err := validateGetTokenBalanceInputList(v.GetTokenBalanceInputs); err != nil { + invalidParams.AddNested("GetTokenBalanceInputs", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetTokenBalanceInput(v *GetTokenBalanceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetTokenBalanceInput"} + if v.TokenIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("TokenIdentifier")) + } else if v.TokenIdentifier != nil { + if err := validateTokenIdentifier(v.TokenIdentifier); err != nil { + invalidParams.AddNested("TokenIdentifier", err.(smithy.InvalidParamsError)) + } + } + if v.OwnerIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("OwnerIdentifier")) + } else if v.OwnerIdentifier != nil { + if err := validateOwnerIdentifier(v.OwnerIdentifier); err != nil { + invalidParams.AddNested("OwnerIdentifier", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetTransactionInput(v *GetTransactionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetTransactionInput"} + if v.TransactionHash == nil { + invalidParams.Add(smithy.NewErrParamRequired("TransactionHash")) + } + if len(v.Network) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Network")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTokenBalancesInput(v *ListTokenBalancesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTokenBalancesInput"} + if v.OwnerFilter != nil { + if err := validateOwnerFilter(v.OwnerFilter); err != nil { + invalidParams.AddNested("OwnerFilter", err.(smithy.InvalidParamsError)) + } + } + if v.TokenFilter == nil { + invalidParams.Add(smithy.NewErrParamRequired("TokenFilter")) + } else if v.TokenFilter != nil { + if err := validateTokenFilter(v.TokenFilter); err != nil { + invalidParams.AddNested("TokenFilter", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTransactionEventsInput(v *ListTransactionEventsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTransactionEventsInput"} + if v.TransactionHash == nil { + invalidParams.Add(smithy.NewErrParamRequired("TransactionHash")) + } + if len(v.Network) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Network")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTransactionsInput(v *ListTransactionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTransactionsInput"} + if v.Address == nil { + invalidParams.Add(smithy.NewErrParamRequired("Address")) + } + if len(v.Network) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Network")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/mediaconvert/types/types.go b/service/mediaconvert/types/types.go index 9d5a2c7b058..627bf0d7786 100644 --- a/service/mediaconvert/types/types.go +++ b/service/mediaconvert/types/types.go @@ -7,13 +7,11 @@ import ( "time" ) -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the -// value AAC. The service accepts one of two mutually exclusive groups of AAC -// settings--VBR and CBR. To select one of these modes, set the value of Bitrate -// control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you control the -// audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use -// the setting Bitrate (bitrate). Defaults and valid values depend on the rate -// control mode. +// Required when you set Codec to the value AAC. The service accepts one of two +// mutually exclusive groups of AAC settings--VBR and CBR. To select one of these +// modes, set the value of Bitrate control mode to "VBR" or "CBR". In VBR mode, you +// control the audio quality with the setting VBR quality. In CBR mode, you use the +// setting Bitrate. Defaults and valid values depend on the rate control mode. type AacSettings struct { // Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio + @@ -32,8 +30,8 @@ type AacSettings struct { // 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, // 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, // 896000, 1024000. The value you set is also constrained by the values that you - // choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample - // rate (sampleRate). Default values depend on Bitrate control mode and Profile. + // choose for Profile, Bitrate control mode, and Sample rate. Default values depend + // on Bitrate control mode and Profile. Bitrate int32 // AAC Profile. @@ -76,8 +74,7 @@ type AacSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the -// value AC3. +// Required when you set Codec to the value AC3. type Ac3Settings struct { // Specify the average bitrate in bits per second. The bitrate that you specify @@ -104,33 +101,29 @@ type Ac3Settings struct { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert ignores - // any value you provide for Dynamic range compression profile - // (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC - // operating modes and profiles, see the Dynamic Range Control chapter of the Dolby - // Metadata Guide at + // any value you provide for Dynamic range compression profile. For information + // about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range + // Control chapter of the Dolby Metadata Guide at // https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine Ac3DynamicRangeCompressionLine // When you want to add Dolby dynamic range compression (DRC) signaling to your // output stream, we recommend that you use the mode-specific settings instead of - // Dynamic range compression profile (DynamicRangeCompressionProfile). The - // mode-specific settings are Dynamic range compression profile, line mode - // (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode - // (dynamicRangeCompressionRf). Note that when you specify values for all three - // settings, MediaConvert ignores the value of this setting in favor of the - // mode-specific settings. If you do use this setting instead of the mode-specific - // settings, choose None (NONE) to leave out DRC signaling. Keep the default Film - // standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for - // all operating modes. + // Dynamic range compression profile. The mode-specific settings are Dynamic range + // compression profile, line mode and Dynamic range compression profile, RF mode. + // Note that when you specify values for all three settings, MediaConvert ignores + // the value of this setting in favor of the mode-specific settings. If you do use + // this setting instead of the mode-specific settings, choose None to leave out DRC + // signaling. Keep the default Film standard to set the profile to Dolby's film + // standard profile for all operating modes. DynamicRangeCompressionProfile Ac3DynamicRangeCompressionProfile // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any value - // you provide for Dynamic range compression profile - // (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC - // operating modes and profiles, see the Dynamic Range Control chapter of the Dolby - // Metadata Guide at + // you provide for Dynamic range compression profile. For information about the + // Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control + // chapter of the Dolby Metadata Guide at // https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf Ac3DynamicRangeCompressionRf @@ -183,12 +176,11 @@ type AdvancedInputFilterSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the -// value AIFF. +// Required when you set Codec to the value AIFF. type AiffSettings struct { - // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding - // quality for this audio track. + // Specify Bit depth, in bits per sample, to choose the encoding quality for this + // audio track. BitDepth int32 // Specify the number of channels in this output audio track. Valid values are 1 @@ -227,10 +219,9 @@ type AllowedRenditionSize struct { type AncillarySourceSettings struct { // Specify whether this set of input captions appears in your outputs in both 608 - // and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the - // captions data in two ways: it passes the 608 data through using the 608 - // compatibility bytes fields of the 708 wrapper, and it also translates the 608 - // data into 708. + // and 708 format. If you choose Upconvert, MediaConvert includes the captions data + // in two ways: it passes the 608 data through using the 608 compatibility bytes + // fields of the 708 wrapper, and it also translates the 608 data into 708. Convert608To708 AncillaryConvert608To708 // Specifies the 608 channel number in the ancillary data track from which to @@ -266,27 +257,22 @@ type AudioChannelTaggingSettings struct { // on the value that you choose for your audio codec. type AudioCodecSettings struct { - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the - // value AAC. The service accepts one of two mutually exclusive groups of AAC - // settings--VBR and CBR. To select one of these modes, set the value of Bitrate - // control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you control the - // audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use - // the setting Bitrate (bitrate). Defaults and valid values depend on the rate - // control mode. + // Required when you set Codec to the value AAC. The service accepts one of two + // mutually exclusive groups of AAC settings--VBR and CBR. To select one of these + // modes, set the value of Bitrate control mode to "VBR" or "CBR". In VBR mode, you + // control the audio quality with the setting VBR quality. In CBR mode, you use the + // setting Bitrate. Defaults and valid values depend on the rate control mode. AacSettings *AacSettings - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the - // value AC3. + // Required when you set Codec to the value AC3. Ac3Settings *Ac3Settings - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the - // value AIFF. + // Required when you set Codec to the value AIFF. AiffSettings *AiffSettings // Choose the audio codec for this output. Note that the option Dolby Digital - // passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus - // audio inputs. Make sure that you choose a codec that's supported with your - // output container: + // passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. + // Make sure that you choose a codec that's supported with your output container: // https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio // For audio-only outputs, make sure that both your input audio codec and your // output audio codec are supported for audio-only workflows. For more information, @@ -296,16 +282,13 @@ type AudioCodecSettings struct { // https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output Codec AudioCodec - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the - // value EAC3_ATMOS. + // Required when you set Codec to the value EAC3_ATMOS. Eac3AtmosSettings *Eac3AtmosSettings - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the - // value EAC3. + // Required when you set Codec to the value EAC3. Eac3Settings *Eac3Settings - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the - // value MP2. + // Required when you set Codec to the value MP2. Mp2Settings *Mp2Settings // Required when you set Codec, under AudioDescriptions>CodecSettings, to the @@ -320,8 +303,7 @@ type AudioCodecSettings struct { // value Vorbis. VorbisSettings *VorbisSettings - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the - // value WAV. + // Required when you set Codec to the value WAV. WavSettings *WavSettings noSmithyDocumentSerde @@ -378,13 +360,12 @@ type AudioDescription struct { CodecSettings *AudioCodecSettings // Specify the language for this audio output track. The service puts this - // language code into your output audio track when you set Language code control - // (AudioLanguageCodeControl) to Use configured (USE_CONFIGURED). The service also - // uses your specified custom language code when you set Language code control - // (AudioLanguageCodeControl) to Follow input (FOLLOW_INPUT), but your input file - // doesn't specify a language code. For all outputs, you can use an ISO 639-2 or - // ISO 639-3 code. For streaming outputs, you can also use any other code in the - // full RFC-5646 specification. Streaming outputs are those that are in one of the + // language code into your output audio track when you set Language code control to + // Use configured. The service also uses your specified custom language code when + // you set Language code control to Follow input, but your input file doesn't + // specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 + // code. For streaming outputs, you can also use any other code in the full + // RFC-5646 specification. Streaming outputs are those that are in one of the // following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth // Streaming. CustomLanguageCode *string @@ -396,11 +377,10 @@ type AudioDescription struct { LanguageCode LanguageCode // Specify which source for language code takes precedence for this audio track. - // When you choose Follow input (FOLLOW_INPUT), the service uses the language code - // from the input track if it's present. If there's no languge code on the input - // track, the service uses the code that you specify in the setting Language code - // (languageCode or customLanguageCode). When you choose Use configured - // (USE_CONFIGURED), the service uses the language code that you specify. + // When you choose Follow input, the service uses the language code from the input + // track if it's present. If there's no languge code on the input track, the + // service uses the code that you specify in the setting Language code. When you + // choose Use configured, the service uses the language code that you specify. LanguageCodeControl AudioLanguageCodeControl // Advanced audio remixing settings. @@ -446,11 +426,11 @@ type AudioNormalizationSettings struct { // track loudness. PeakCalculation AudioNormalizationPeakCalculation - // When you use Audio normalization (AudioNormalizationSettings), optionally use - // this setting to specify a target loudness. If you don't specify a value here, - // the encoder chooses a value for you, based on the algorithm that you choose for - // Algorithm (algorithm). If you choose algorithm 1770-1, the encoder will choose - // -24 LKFS; otherwise, the encoder will choose -23 LKFS. + // When you use Audio normalization, optionally use this setting to specify a + // target loudness. If you don't specify a value here, the encoder chooses a value + // for you, based on the algorithm that you choose for Algorithm. If you choose + // algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will + // choose -23 LKFS. TargetLkfs float64 // Specify the True-peak limiter threshold in decibels relative to full scale @@ -462,9 +442,8 @@ type AudioNormalizationSettings struct { noSmithyDocumentSerde } -// Use Audio selectors (AudioSelectors) to specify a track or set of tracks from -// the input that you will use in your outputs. You can use multiple Audio -// selectors per input. +// Use Audio selectors to specify a track or set of tracks from the input that you +// will use in your outputs. You can use multiple Audio selectors per input. type AudioSelector struct { // Apply audio timing corrections to help synchronize audio and video in your @@ -518,11 +497,9 @@ type AudioSelector struct { // extract specific program data from the track. To select multiple programs, // create multiple selectors with the same Track and different Program numbers. In // the console, this setting is visible when you set Selector type to Track. Choose - // the program number from the dropdown list. If you are sending a JSON file, - // provide the program ID, which is part of the audio metadata. If your input file - // has incorrect metadata, you can choose All channels instead of a program number - // to have the service ignore the program IDs and include all the programs in the - // track. + // the program number from the dropdown list. If your input file has incorrect + // metadata, you can choose All channels instead of a program number to have the + // service ignore the program IDs and include all the programs in the track. ProgramSelection int32 // Use these settings to reorder the audio channels of one input to match those of @@ -536,18 +513,16 @@ type AudioSelector struct { // Identify a track from the input audio to include in this selector by entering // the track index number. To include several tracks in a single audio selector, // specify multiple tracks as follows. Using the console, enter a comma-separated - // list. For examle, type "1,2,3" to include tracks 1 through 3. Specifying - // directly in your JSON job file, provide the track numbers in an array. For - // example, "tracks": [1,2,3]. + // list. For example, type "1,2,3" to include tracks 1 through 3. Tracks []int32 noSmithyDocumentSerde } // Use audio selector groups to combine multiple sidecar audio inputs so that you -// can assign them to a single output audio tab (AudioDescription). Note that, if -// you're working with embedded audio, it's simpler to assign multiple input tracks -// into a single audio selector rather than use an audio selector group. +// can assign them to a single output audio tab. Note that, if you're working with +// embedded audio, it's simpler to assign multiple input tracks into a single audio +// selector rather than use an audio selector group. type AudioSelectorGroup struct { // Name of an Audio Selector within the same input to include in the group. Audio @@ -675,21 +650,21 @@ type AutomatedEncodingSettings struct { } // Settings for quality-defined variable bitrate encoding with the AV1 codec. Use -// these settings only when you set QVBR for Rate control mode (RateControlMode). +// these settings only when you set QVBR for Rate control mode. type Av1QvbrSettings struct { - // Use this setting only when you set Rate control mode (RateControlMode) to QVBR. - // Specify the target quality level for this output. MediaConvert determines the - // right number of bits to use for each part of the video to maintain the video - // quality that you specify. When you keep the default value, AUTO, MediaConvert - // picks a quality level for you, based on characteristics of your input video. If - // you prefer to specify a quality level, specify a number from 1 through 10. Use - // higher numbers for greater quality. Level 10 results in nearly lossless - // compression. The quality level for most broadcast-quality transcodes is between - // 6 and 9. Optionally, to specify a value between whole numbers, also provide a - // value for the setting qvbrQualityLevelFineTune. For example, if you want your - // QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set - // qvbrQualityLevelFineTune to .33. + // Use this setting only when you set Rate control mode to QVBR. Specify the + // target quality level for this output. MediaConvert determines the right number + // of bits to use for each part of the video to maintain the video quality that you + // specify. When you keep the default value, AUTO, MediaConvert picks a quality + // level for you, based on characteristics of your input video. If you prefer to + // specify a quality level, specify a number from 1 through 10. Use higher numbers + // for greater quality. Level 10 results in nearly lossless compression. The + // quality level for most broadcast-quality transcodes is between 6 and 9. + // Optionally, to specify a value between whole numbers, also provide a value for + // the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality + // level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to + // .33. QvbrQualityLevel int32 // Optional. Specify a value here to set the QVBR quality to a level that is @@ -708,25 +683,18 @@ type Av1QvbrSettings struct { type Av1Settings struct { // Specify the strength of any adaptive quantization filters that you enable. The - // value that you choose here applies to Spatial adaptive quantization - // (spatialAdaptiveQuantization). + // value that you choose here applies to Spatial adaptive quantization. AdaptiveQuantization Av1AdaptiveQuantization - // Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit - // (BIT_10). + // Specify the Bit depth. You can choose 8-bit or 10-bit. BitDepth Av1BitDepth - // If you are using the console, use the Framerate setting to specify the frame - // rate for this output. If you want to keep the same frame rate as the input - // video, choose Follow source. If you want to do frame rate conversion, choose a - // frame rate from the dropdown list or choose Custom. The framerates shown in the - // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // Use the Framerate setting to specify the frame rate for this output. If you + // want to keep the same frame rate as the input video, choose Follow source. If + // you want to do frame rate conversion, choose a frame rate from the dropdown list + // or choose Custom. The framerates shown in the dropdown list are decimal + // approximations of fractions. If you choose Custom, specify your frame rate as a + // fraction. FramerateControl Av1FramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -773,8 +741,7 @@ type Av1Settings struct { NumberBFramesBetweenReferenceFrames int32 // Settings for quality-defined variable bitrate encoding with the H.265 codec. - // Use these settings only when you set QVBR for Rate control mode - // (RateControlMode). + // Use these settings only when you set QVBR for Rate control mode. QvbrSettings *Av1QvbrSettings // 'With AV1 outputs, for rate control mode, MediaConvert supports only @@ -787,20 +754,20 @@ type Av1Settings struct { // or equal to half the number of macroblock rows. Slices int32 - // Keep the default value, Enabled (ENABLED), to adjust quantization within each - // frame based on spatial variation of content complexity. When you enable this - // feature, the encoder uses fewer bits on areas that can sustain more distortion - // with no noticeable visual degradation and uses more bits on areas where any - // small distortion will be noticeable. For example, complex textured blocks are - // encoded with fewer bits and smooth textured blocks are encoded with more bits. - // Enabling this feature will almost always improve your video quality. Note, - // though, that this feature doesn't take into account where the viewer's attention - // is likely to be. If viewers are likely to be focusing their attention on a part - // of the screen with a lot of complex texture, you might choose to disable this - // feature. Related setting: When you enable spatial adaptive quantization, set the - // value for Adaptive quantization (adaptiveQuantization) depending on your - // content. For homogeneous content, such as cartoons and video games, set it to - // Low. For content with a wider variety of textures, set it to High or Higher. + // Keep the default value, Enabled, to adjust quantization within each frame based + // on spatial variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas that can sustain more distortion with no + // noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, that + // this feature doesn't take into account where the viewer's attention is likely to + // be. If viewers are likely to be focusing their attention on a part of the screen + // with a lot of complex texture, you might choose to disable this feature. Related + // setting: When you enable spatial adaptive quantization, set the value for + // Adaptive quantization depending on your content. For homogeneous content, such + // as cartoons and video games, set it to Low. For content with a wider variety of + // textures, set it to High or Higher. SpatialAdaptiveQuantization Av1SpatialAdaptiveQuantization noSmithyDocumentSerde @@ -834,9 +801,8 @@ type AvcIntraSettings struct { // subsampling. AvcIntraClass AvcIntraClass - // Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K - // (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object - // isn't allowed. + // Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra + // class to a different value, this object isn't allowed. AvcIntraUhdSettings *AvcIntraUhdSettings // If you are using the console, use the Framerate setting to specify the frame @@ -844,12 +810,7 @@ type AvcIntraSettings struct { // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list or choose Custom. The framerates shown in the // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // specify your frame rate as a fraction. FramerateControl AvcIntraFramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -882,32 +843,29 @@ type AvcIntraSettings struct { FramerateNumerator int32 // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type of - // your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom - // (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the - // source. For jobs that have multiple inputs, the output field polarity might - // change over the course of the output. Follow behavior depends on the input scan - // type. If the source is interlaced, the output will be interlaced with the same - // polarity as the source. If the source is progressive, the output will be - // interlaced with top field bottom field first, depending on which of the Follow - // options you choose. + // to create a progressive output, regardless of the scan type of your input. Use + // Top field first or Bottom field first to create an output that's interlaced with + // the same field polarity throughout. Use Follow, default top or Follow, default + // bottom to produce outputs with the same field polarity as the source. For jobs + // that have multiple inputs, the output field polarity might change over the + // course of the output. Follow behavior depends on the input scan type. If the + // source is interlaced, the output will be interlaced with the same polarity as + // the source. If the source is progressive, the output will be interlaced with top + // field bottom field first, depending on which of the Follow options you choose. InterlaceMode AvcIntraInterlaceMode // Use this setting for interlaced outputs, when your output frame rate is half of - // your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced field - // in the output. Keep the default value, Basic interlacing (INTERLACED), for all - // other output frame rates. With basic interlacing, MediaConvert performs any - // frame rate conversion first and then interlaces the frames. When you choose - // Optimized interlacing and you set your output frame rate to a value that isn't - // suitable for optimized interlacing, MediaConvert automatically falls back to - // basic interlacing. Required settings: To use optimized interlacing, you must set - // Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized - // interlacing for hard telecine outputs. You must also set Interlace mode - // (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // your input frame rate. In this situation, choose Optimized interlacing to create + // a better quality interlaced output. In this case, each progressive frame from + // the input corresponds to an interlaced field in the output. Keep the default + // value, Basic interlacing, for all other output frame rates. With basic + // interlacing, MediaConvert performs any frame rate conversion first and then + // interlaces the frames. When you choose Optimized interlacing and you set your + // output frame rate to a value that isn't suitable for optimized interlacing, + // MediaConvert automatically falls back to basic interlacing. Required settings: + // To use optimized interlacing, you must set Telecine to None or Soft. You can't + // use optimized interlacing for hard telecine outputs. You must also set Interlace + // mode to a value other than Progressive. ScanTypeConversionMode AvcIntraScanTypeConversionMode // Ignore this setting unless your input frame rate is 23.976 or 24 frames per @@ -915,31 +873,29 @@ type AvcIntraSettings struct { // PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio // to keep it synchronized with the video. Note that enabling this setting will // slightly reduce the duration of your video. Required settings: You must also set - // Framerate to 25. In your JSON job specification, set (framerateControl) to - // (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1. + // Framerate to 25. SlowPal AvcIntraSlowPal // When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 // fps, and your output scan type is interlaced, you can optionally enable hard - // telecine (HARD) to create a smoother picture. When you keep the default value, - // None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without - // doing anything with the field polarity to create a smoother picture. + // telecine to create a smoother picture. When you keep the default value, None, + // MediaConvert does a standard frame rate conversion to 29.97 without doing + // anything with the field polarity to create a smoother picture. Telecine AvcIntraTelecine noSmithyDocumentSerde } -// Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K -// (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object -// isn't allowed. +// Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra +// class to a different value, this object isn't allowed. type AvcIntraUhdSettings struct { - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how many - // transcoding passes MediaConvert does with your video. When you choose Multi-pass - // (MULTI_PASS), your video quality is better and your output bitrate is more - // accurate. That is, the actual bitrate of your output is closer to the target - // bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), - // your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS). + // Optional. Use Quality tuning level to choose how many transcoding passes + // MediaConvert does with your video. When you choose Multi-pass, your video + // quality is better and your output bitrate is more accurate. That is, the actual + // bitrate of your output is closer to the target bitrate defined in the + // specification. When you choose Single-pass, your encoding time is faster. The + // default behavior is Single-pass. QualityTuningLevel AvcIntraUhdQualityTuningLevel noSmithyDocumentSerde @@ -978,8 +934,6 @@ type BandwidthReductionFilter struct { // content with the captions. Set up burn-in captions in the same output as your // video. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to BURN_IN. type BurninDestinationSettings struct { // Specify the alignment of your captions. If no explicit x_position is provided, @@ -990,58 +944,58 @@ type BurninDestinationSettings struct { // relative to those coordinates. Alignment BurninSubtitleAlignment - // Ignore this setting unless Style passthrough (StylePassthrough) is set to - // Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or - // Hex. Use Apply font color (ApplyFontColor) for additional font color controls. - // When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font - // color setting only applies to white text in your input captions. For example, if - // your font color setting is Yellow, and your input captions have red and white - // text, your output captions will have red and yellow text. When you choose - // ALL_TEXT, your font color setting applies to all of your output captions text. + // Ignore this setting unless Style passthrough is set to Enabled and Font color + // set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for + // additional font color controls. When you choose White text only, or leave blank, + // your font color setting only applies to white text in your input captions. For + // example, if your font color setting is Yellow, and your input captions have red + // and white text, your output captions will have red and yellow text. When you + // choose ALL_TEXT, your font color setting applies to all of your output captions + // text. ApplyFontColor BurninSubtitleApplyFontColor // Specify the color of the rectangle behind the captions. Leave background color - // (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled - // to use the background color data from your input captions, if present. + // blank and set Style passthrough to enabled to use the background color data from + // your input captions, if present. BackgroundColor BurninSubtitleBackgroundColor // Specify the opacity of the background rectangle. Enter a value from 0 to 255, - // where 0 is transparent and 255 is opaque. If Style passthrough - // (StylePassthrough) is set to enabled, leave blank to pass through the background - // style information in your input captions to your output captions. If Style - // passthrough is set to disabled, leave blank to use a value of 0 and remove all - // backgrounds from your output captions. + // where 0 is transparent and 255 is opaque. If Style passthrough is set to + // enabled, leave blank to pass through the background style information in your + // input captions to your output captions. If Style passthrough is set to disabled, + // leave blank to use a value of 0 and remove all backgrounds from your output + // captions. BackgroundOpacity int32 // Specify the font that you want the service to use for your burn in captions // when your input captions specify a font that MediaConvert doesn't support. When - // you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, - // MediaConvert uses a supported font that most closely matches the font that your - // input captions specify. When there are multiple unsupported fonts in your input - // captions, MediaConvert matches each font with the supported font that matches - // best. When you explicitly choose a replacement font, MediaConvert uses that font - // to replace all unsupported fonts from your input. + // you set Fallback font to best match, or leave blank, MediaConvert uses a + // supported font that most closely matches the font that your input captions + // specify. When there are multiple unsupported fonts in your input captions, + // MediaConvert matches each font with the supported font that matches best. When + // you explicitly choose a replacement font, MediaConvert uses that font to replace + // all unsupported fonts from your input. FallbackFont BurninSubtitleFallbackFont - // Specify the color of the burned-in captions text. Leave Font color (FontColor) - // blank and set Style passthrough (StylePassthrough) to enabled to use the font - // color data from your input captions, if present. + // Specify the color of the burned-in captions text. Leave Font color blank and + // set Style passthrough to enabled to use the font color data from your input + // captions, if present. FontColor BurninSubtitleFontColor // Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent. FontOpacity int32 - // Specify the Font resolution (FontResolution) in DPI (dots per inch). + // Specify the Font resolution in DPI (dots per inch). FontResolution int32 - // Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave - // blank, to automatically determine the font script in your input captions. - // Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if - // your input font script uses Simplified or Traditional Chinese. + // Set Font script to Automatically determined, or leave blank, to automatically + // determine the font script in your input captions. Otherwise, set to Simplified + // Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses + // Simplified or Traditional Chinese. FontScript FontScript - // Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to - // 0, or leave blank, for automatic font size. + // Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave + // blank, for automatic font size. FontSize int32 // Ignore this setting unless your Font color is set to Hex. Enter either six or @@ -1050,27 +1004,26 @@ type BurninDestinationSettings struct { // a green value of 0x22, a blue value of 0xAA, and an alpha value of 0xBB. HexFontColor *string - // Specify font outline color. Leave Outline color (OutlineColor) blank and set - // Style passthrough (StylePassthrough) to enabled to use the font outline color - // data from your input captions, if present. + // Specify font outline color. Leave Outline color blank and set Style passthrough + // to enabled to use the font outline color data from your input captions, if + // present. OutlineColor BurninSubtitleOutlineColor - // Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave - // Outline size blank and set Style passthrough (StylePassthrough) to enabled to - // use the outline size data from your input captions, if present. + // Specify the Outline size of the caption text, in pixels. Leave Outline size + // blank and set Style passthrough to enabled to use the outline size data from + // your input captions, if present. OutlineSize int32 - // Specify the color of the shadow cast by the captions. Leave Shadow color - // (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to - // use the shadow color data from your input captions, if present. + // Specify the color of the shadow cast by the captions. Leave Shadow color blank + // and set Style passthrough to enabled to use the shadow color data from your + // input captions, if present. ShadowColor BurninSubtitleShadowColor // Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is - // transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to - // Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow - // style information in your input captions to your output captions. If Style - // passthrough is set to disabled, leave blank to use a value of 0 and remove all - // shadows from your output captions. + // transparent and 255 is opaque. If Style passthrough is set to Enabled, leave + // Shadow opacity blank to pass through the shadow style information in your input + // captions to your output captions. If Style passthrough is set to disabled, leave + // blank to use a value of 0 and remove all shadows from your output captions. ShadowOpacity int32 // Specify the horizontal offset of the shadow, relative to the captions in @@ -1079,39 +1032,36 @@ type BurninDestinationSettings struct { // Specify the vertical offset of the shadow relative to the captions in pixels. A // value of -2 would result in a shadow offset 2 pixels above the text. Leave - // Shadow y-offset (ShadowYOffset) blank and set Style passthrough - // (StylePassthrough) to enabled to use the shadow y-offset data from your input - // captions, if present. + // Shadow y-offset blank and set Style passthrough to enabled to use the shadow + // y-offset data from your input captions, if present. ShadowYOffset int32 - // Set Style passthrough (StylePassthrough) to ENABLED to use the available style, - // color, and position information from your input captions. MediaConvert uses - // default settings for any missing style and position information in your input - // captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style - // and position information from your input captions and use default settings: - // white text with black outlining, bottom-center positioning, and automatic - // sizing. Whether you set Style passthrough to enabled or not, you can also choose - // to manually override any of the individual style and position settings. + // Set Style passthrough to ENABLED to use the available style, color, and + // position information from your input captions. MediaConvert uses default + // settings for any missing style and position information in your input captions. + // Set Style passthrough to DISABLED, or leave blank, to ignore the style and + // position information from your input captions and use default settings: white + // text with black outlining, bottom-center positioning, and automatic sizing. + // Whether you set Style passthrough to enabled or not, you can also choose to + // manually override any of the individual style and position settings. StylePassthrough BurnInSubtitleStylePassthrough - // Specify whether the text spacing (TeletextSpacing) in your captions is set by - // the captions grid, or varies depending on letter width. Choose fixed grid - // (FIXED_GRID) to conform to the spacing specified in the captions file more - // accurately. Choose proportional (PROPORTIONAL) to make the text easier to read - // for closed captions. + // Specify whether the text spacing in your captions is set by the captions grid, + // or varies depending on letter width. Choose fixed grid to conform to the spacing + // specified in the captions file more accurately. Choose proportional to make the + // text easier to read for closed captions. TeletextSpacing BurninSubtitleTeletextSpacing - // Specify the horizontal position (XPosition) of the captions, relative to the - // left side of the output in pixels. A value of 10 would result in the captions - // starting 10 pixels from the left of the output. If no explicit x_position is - // provided, the horizontal caption position will be determined by the alignment - // parameter. + // Specify the horizontal position of the captions, relative to the left side of + // the output in pixels. A value of 10 would result in the captions starting 10 + // pixels from the left of the output. If no explicit x_position is provided, the + // horizontal caption position will be determined by the alignment parameter. XPosition int32 - // Specify the vertical position (YPosition) of the captions, relative to the top - // of the output in pixels. A value of 10 would result in the captions starting 10 - // pixels from the top of the output. If no explicit y_position is provided, the - // caption will be positioned towards the bottom of the output. + // Specify the vertical position of the captions, relative to the top of the + // output in pixels. A value of 10 would result in the captions starting 10 pixels + // from the top of the output. If no explicit y_position is provided, the caption + // will be positioned towards the bottom of the output. YPosition int32 noSmithyDocumentSerde @@ -1138,11 +1088,10 @@ type CaptionDescription struct { // Streaming. CustomLanguageCode *string - // Settings related to one captions tab on the MediaConvert console. In your job - // JSON, an instance of captions DestinationSettings is equivalent to one captions - // tab in the console. Usually, one captions tab corresponds to one output captions - // track. Depending on your output captions format, one tab might correspond to a - // set of output captions tracks. For more information, see + // Settings related to one captions tab on the MediaConvert console. Usually, one + // captions tab corresponds to one output captions track. Depending on your output + // captions format, one tab might correspond to a set of output captions tracks. + // For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. DestinationSettings *CaptionDestinationSettings @@ -1177,11 +1126,10 @@ type CaptionDescriptionPreset struct { // Streaming. CustomLanguageCode *string - // Settings related to one captions tab on the MediaConvert console. In your job - // JSON, an instance of captions DestinationSettings is equivalent to one captions - // tab in the console. Usually, one captions tab corresponds to one output captions - // track. Depending on your output captions format, one tab might correspond to a - // set of output captions tracks. For more information, see + // Settings related to one captions tab on the MediaConvert console. Usually, one + // captions tab corresponds to one output captions track. Depending on your output + // captions format, one tab might correspond to a set of output captions tracks. + // For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. DestinationSettings *CaptionDestinationSettings @@ -1202,11 +1150,10 @@ type CaptionDescriptionPreset struct { noSmithyDocumentSerde } -// Settings related to one captions tab on the MediaConvert console. In your job -// JSON, an instance of captions DestinationSettings is equivalent to one captions -// tab in the console. Usually, one captions tab corresponds to one output captions -// track. Depending on your output captions format, one tab might correspond to a -// set of output captions tracks. For more information, see +// Settings related to one captions tab on the MediaConvert console. Usually, one +// captions tab corresponds to one output captions track. Depending on your output +// captions format, one tab might correspond to a set of output captions tracks. +// For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. type CaptionDestinationSettings struct { @@ -1215,8 +1162,6 @@ type CaptionDestinationSettings struct { // content with the captions. Set up burn-in captions in the same output as your // video. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to BURN_IN. BurninDestinationSettings *BurninDestinationSettings // Specify the format for this set of captions on this output. The default format @@ -1224,25 +1169,19 @@ type CaptionDestinationSettings struct { // constrains your choice of output captions format. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. // If you are using SCTE-20 and you want to create an output that complies with the - // SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a - // non-compliant output where the embedded captions come first, choose Embedded - // plus SCTE-20 (EMBEDDED_PLUS_SCTE20). + // SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output + // where the embedded captions come first, choose Embedded plus SCTE-20. DestinationType CaptionDestinationType // Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same // output as your video. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to DVB_SUB. DvbSubDestinationSettings *DvbSubDestinationSettings // Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or // ancillary) captions. Set up embedded captions in the same output as your video. // For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to EMBEDDED, - // EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED. EmbeddedDestinationSettings *EmbeddedDestinationSettings // Settings related to IMSC captions. IMSC is a sidecar format that holds captions @@ -1250,8 +1189,6 @@ type CaptionDestinationSettings struct { // the same output group, but different output from your video. For more // information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to IMSC. ImscDestinationSettings *ImscDestinationSettings // Settings related to SCC captions. SCC is a sidecar format that holds captions @@ -1259,22 +1196,16 @@ type CaptionDestinationSettings struct { // the same output group, but different output from your video. For more // information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to SCC. SccDestinationSettings *SccDestinationSettings // Settings related to SRT captions. SRT is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions in - // the same output group, but different output from your video. When you work - // directly in your JSON job specification, include this object and any required - // children when you set destinationType to SRT. + // the same output group, but different output from your video. SrtDestinationSettings *SrtDestinationSettings // Settings related to teletext captions. Set up teletext captions in the same // output as your video. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to TELETEXT. TeletextDestinationSettings *TeletextDestinationSettings // Settings related to TTML captions. TTML is a sidecar format that holds captions @@ -1282,8 +1213,6 @@ type CaptionDestinationSettings struct { // the same output group, but different output from your video. For more // information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to TTML. TtmlDestinationSettings *TtmlDestinationSettings // Settings related to WebVTT captions. WebVTT is a sidecar format that holds @@ -1291,8 +1220,6 @@ type CaptionDestinationSettings struct { // captions in the same output group, but different output from your video. For // more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. - // When you work directly in your JSON job specification, include this object and - // any required children when you set destinationType to WebVTT. WebvttDestinationSettings *WebvttDestinationSettings noSmithyDocumentSerde @@ -1330,20 +1257,18 @@ type CaptionSelector struct { // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions and // input video, specify the frame rate of the captions file. Specify this value as -// a fraction. When you work directly in your JSON job specification, use the -// settings framerateNumerator and framerateDenominator. For example, you might -// specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or -// 30000 / 1001 for 29.97 fps. +// a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, +// 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps. type CaptionSourceFramerate struct { // Specify the denominator of the fraction that represents the frame rate for the - // setting Caption source frame rate (CaptionSourceFramerate). Use this setting - // along with the setting Framerate numerator (framerateNumerator). + // setting Caption source frame rate. Use this setting along with the setting + // Framerate numerator. FramerateDenominator int32 // Specify the numerator of the fraction that represents the frame rate for the - // setting Caption source frame rate (CaptionSourceFramerate). Use this setting - // along with the setting Framerate denominator (framerateDenominator). + // setting Caption source frame rate. Use this setting along with the setting + // Framerate denominator. FramerateNumerator int32 noSmithyDocumentSerde @@ -1369,8 +1294,8 @@ type CaptionSourceSettings struct { // FileSoureSettings. FileSourceSettings *FileSourceSettings - // Use Source (SourceType) to identify the format of your input captions. The - // service cannot auto-detect caption format. + // Use Source to identify the format of your input captions. The service cannot + // auto-detect caption format. SourceType CaptionSourceType // Settings specific to Teletext caption sources, including Page number. @@ -1395,17 +1320,17 @@ type CaptionSourceSettings struct { noSmithyDocumentSerde } -// Channel mapping (ChannelMapping) contains the group of fields that hold the -// remixing value for each channel, in dB. Specify remix values to indicate how -// much of the content from your input audio channel you want in your output audio -// channels. Each instance of the InputChannels or InputChannelsFineTune array -// specifies these values for one output channel. Use one instance of this array -// for each output channel. In the console, each array corresponds to a column in -// the graphical depiction of the mapping matrix. The rows of the graphical matrix -// correspond to input channels. Valid values are within the range from -60 (mute) -// through 6. A setting of 0 passes the input channel unchanged to the output -// channel (no attenuation or amplification). Use InputChannels or -// InputChannelsFineTune to specify your remix values. Don't use both. +// Channel mapping contains the group of fields that hold the remixing value for +// each channel, in dB. Specify remix values to indicate how much of the content +// from your input audio channel you want in your output audio channels. Each +// instance of the InputChannels or InputChannelsFineTune array specifies these +// values for one output channel. Use one instance of this array for each output +// channel. In the console, each array corresponds to a column in the graphical +// depiction of the mapping matrix. The rows of the graphical matrix correspond to +// input channels. Valid values are within the range from -60 (mute) through 6. A +// setting of 0 passes the input channel unchanged to the output channel (no +// attenuation or amplification). Use InputChannels or InputChannelsFineTune to +// specify your remix values. Don't use both. type ChannelMapping struct { // In your JSON job specification, include one child of OutputChannels for each @@ -1486,7 +1411,7 @@ type CmafEncryptionSettings struct { ConstantInitializationVector *string // Specify the encryption scheme that you want the service to use when encrypting - // your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). + // your CMAF segments. Choose AES-CBC subsample or AES_CTR. EncryptionMethod CmafEncryptionType // When you use DRM with CMAF outputs, choose whether the service writes the @@ -1510,10 +1435,7 @@ type CmafEncryptionSettings struct { } // Settings related to your CMAF output package. For more information, see -// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When -// you work directly in your JSON job specification, include this object and any -// required children when you set Type, under OutputGroupSettings, to -// CMAF_GROUP_SETTINGS. +// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. type CmafGroupSettings struct { // By default, the service creates one top-level .m3u8 HLS manifest and one top @@ -1530,8 +1452,8 @@ type CmafGroupSettings struct { BaseUrl *string // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no - // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in - // your video distribution set up. For example, use the Cache-Control http header. + // tag. Otherwise, keep the default value Enabled and control caching in your video + // distribution set up. For example, use the Cache-Control http header. ClientCache CmafClientCache // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist @@ -1547,11 +1469,10 @@ type CmafGroupSettings struct { // in each AdaptationSet: Choose Distinct. DashManifestStyle DashManifestStyle - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify the - // base filename in the URI, the service will use the filename of the input file. - // If your job has multiple inputs, the service uses the filename of the first - // input file. + // Use Destination to specify the S3 output location and the output filename base. + // Destination accepts format identifiers. If you do not specify the base filename + // in the URI, the service will use the filename of the input file. If your job has + // multiple inputs, the service uses the filename of the first input file. Destination *string // Settings associated with the destination. Will vary based on the type of @@ -1563,22 +1484,19 @@ type CmafGroupSettings struct { // Specify the length, in whole seconds, of the mp4 fragments. When you don't // specify a value, MediaConvert defaults to 2. Related setting: Use Fragment - // length control (FragmentLengthControl) to specify whether the encoder enforces - // this value strictly. + // length control to specify whether the encoder enforces this value strictly. FragmentLength int32 // Specify whether MediaConvert generates images for trick play. Keep the default - // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to - // generate tiled thumbnails. Choose Thumbnail and full frame - // (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution - // images of single frames. When you enable Write HLS manifest (WriteHlsManifest), + // value, None, to not generate any images. Choose Thumbnail to generate tiled + // thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and + // full-resolution images of single frames. When you enable Write HLS manifest, // MediaConvert creates a child manifest for each set of images that you generate // and adds corresponding entries to the parent manifest. When you enable Write - // DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd - // manifest for each set of images that you generate. A common application for - // these images is Roku trick mode. The thumbnails and full-frame images that - // MediaConvert creates with this feature are compatible with this Roku - // specification: + // DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of + // images that you generate. A common application for these images is Roku trick + // mode. The thumbnails and full-frame images that MediaConvert creates with this + // feature are compatible with this Roku specification: // https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay CmafImageBasedTrickPlay @@ -1618,23 +1536,22 @@ type CmafGroupSettings struct { MpdManifestBandwidthType CmafMpdManifestBandwidthType // Specify whether your DASH profile is on-demand or main. When you choose Main - // profile (MAIN_PROFILE), the service signals - // urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you - // choose On-demand (ON_DEMAND_PROFILE), the service signals + // profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd + // DASH manifest. When you choose On-demand, the service signals // urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose - // On-demand, you must also set the output group setting Segment control - // (SegmentControl) to Single file (SINGLE_FILE). + // On-demand, you must also set the output group setting Segment control to Single + // file. MpdProfile CmafMpdProfile // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps - // in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you - // want MediaConvert to use the initial PTS as the first time stamp in the - // manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial - // PTS in the video stream and instead write the initial time stamp as zero in the - // manifest. For outputs that don't have B-frames, the time stamps in your DASH - // manifests start at zero regardless of your choice here. + // in output DASH manifests. Choose Match initial PTS when you want MediaConvert to + // use the initial PTS as the first time stamp in the manifest. Choose Zero-based + // to have MediaConvert ignore the initial PTS in the video stream and instead + // write the initial time stamp as zero in the manifest. For outputs that don't + // have B-frames, the time stamps in your DASH manifests start at zero regardless + // of your choice here. PtsOffsetHandlingForBFrames CmafPtsOffsetHandlingForBFrames // When set to SINGLE_FILE, a single output file is generated, which is internally @@ -1644,17 +1561,15 @@ type CmafGroupSettings struct { // Specify the length, in whole seconds, of each segment. When you don't specify a // value, MediaConvert defaults to 10. Related settings: Use Segment length control - // (SegmentLengthControl) to specify whether the encoder enforces this value - // strictly. Use Segment control (CmafSegmentControl) to specify whether - // MediaConvert creates separate segment files or one content file that has - // metadata to mark the segment boundaries. + // to specify whether the encoder enforces this value strictly. Use Segment control + // to specify whether MediaConvert creates separate segment files or one content + // file that has metadata to mark the segment boundaries. SegmentLength int32 // Specify how you want MediaConvert to determine the segment length. Choose Exact - // (EXACT) to have the encoder use the exact length that you specify with the - // setting Segment length (SegmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // to have the encoder use the exact length that you specify with the setting + // Segment length. This might result in extra I-frames. Choose Multiple of GOP to + // have the encoder round up the segment lengths to match the next GOP boundary. SegmentLengthControl CmafSegmentLengthControl // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag of @@ -1687,13 +1602,12 @@ type CmafGroupSettings struct { // When set to ENABLED, an Apple HLS manifest will be generated for this output. WriteHlsManifest CmafWriteHLSManifest - // When you enable Precise segment duration in DASH manifests - // (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment - // durations. The segment duration information appears inside the SegmentTimeline - // element, inside SegmentTemplate at the Representation level. When this feature - // isn't enabled, the segment durations in your DASH manifest are approximate. The - // segment duration information appears in the duration attribute of the - // SegmentTemplate element. + // When you enable Precise segment duration in DASH manifests, your DASH manifest + // shows precise segment durations. The segment duration information appears inside + // the SegmentTimeline element, inside SegmentTemplate at the Representation level. + // When this feature isn't enabled, the segment durations in your DASH manifest are + // approximate. The segment duration information appears in the duration attribute + // of the SegmentTemplate element. WriteSegmentTimelineInRepresentation CmafWriteSegmentTimelineInRepresentation noSmithyDocumentSerde @@ -1741,17 +1655,16 @@ type CmfcSettings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to ensure - // that the total duration of each audio stream is at least as long as the total - // duration of the video stream. After padding or trimming, the audio stream - // duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of the - // output. For unsegmented outputs, MediaConvert adds padding only to the end of - // the file. When you keep the default value, any minor discrepancies between audio - // and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. In all + // other cases, keep the default value, Default codec duration. When you choose + // Match video duration, MediaConvert pads the output audio streams with silence or + // trims them to ensure that the total duration of each audio stream is at least as + // long as the total duration of the video stream. After padding or trimming, the + // audio stream duration is no more than one frame longer than the video stream. + // MediaConvert applies audio padding or trimming only to the end of the last + // segment of the output. For unsegmented outputs, MediaConvert adds padding only + // to the end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. AudioDuration CmfcAudioDuration // Specify the audio rendition group for this audio rendition. Specify up to one @@ -1762,7 +1675,7 @@ type CmfcSettings struct { // #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio_aac_1". Related setting: To associate // the rendition group that this audio track belongs to with a video rendition, // include the same value that you provide here for that video output's setting - // Audio rendition sets (audioRenditionSets). + // Audio rendition sets. AudioGroupId *string // List the audio rendition groups that you want included with this video @@ -1770,12 +1683,12 @@ type CmfcSettings struct { // audio rendition groups that have the audio group IDs "audio_aac_1" and // "audio_dolby". Then you would specify this value: "audio_aac_1,audio_dolby". // Related setting: The rendition groups that you include in your comma-separated - // list should all match values that you specify in the setting Audio group ID - // (AudioGroupId) for audio renditions in the same output group as this video - // rendition. Default behavior: If you don't specify anything here and for Audio - // group ID, MediaConvert puts each audio variant in its own audio rendition group - // and associates it with every video variant. Each value in your list appears in - // your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO + // list should all match values that you specify in the setting Audio group ID for + // audio renditions in the same output group as this video rendition. Default + // behavior: If you don't specify anything here and for Audio group ID, + // MediaConvert puts each audio variant in its own audio rendition group and + // associates it with every video variant. Each value in your list appears in your + // HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO // attribute. To continue the previous example, say that the file name for the // child manifest for your video rendition is "amazing_video_1.m3u8". Then, in your // parent manifest, each value will appear on separate lines, like this: @@ -1790,33 +1703,30 @@ type CmfcSettings struct { // the audio variant. For more information about these attributes, see the Apple // documentation article // https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. - // Choose Alternate audio, auto select, default - // (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES. - // Choose this value for only one variant in your output group. Choose Alternate - // audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO - // and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO - // and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert + // Choose Alternate audio, auto select, default to set DEFAULT=YES and + // AUTOSELECT=YES. Choose this value for only one variant in your output group. + // Choose Alternate audio, auto select, not default to set DEFAULT=NO and + // AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and + // AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert // defaults to Alternate audio, auto select, default. When there is more than one // variant in your output group, you must explicitly choose a value for this // setting. AudioTrackType CmfcAudioTrackType // Specify whether to flag this audio track as descriptive video service (DVS) in - // your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the + // your HLS parent manifest. When you choose Flag, MediaConvert includes the // parameter CHARACTERISTICS="public.accessibility.describes-video" in the - // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag - // (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with - // accessibility on Apple devices. For more information, see the Apple - // documentation. + // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, + // MediaConvert leaves this parameter out. The DVS flag can help with accessibility + // on Apple devices. For more information, see the Apple documentation. DescriptiveVideoServiceFlag CmfcDescriptiveVideoServiceFlag - // Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest - // that lists only the I-frames for this rendition, in addition to your regular - // manifest for this rendition. You might use this manifest as part of a workflow - // that creates preview functions for your video. MediaConvert adds both the - // I-frame only child manifest and the regular child manifest to the parent - // manifest. When you don't need the I-frame only child manifest, keep the default - // value Exclude (EXCLUDE). + // Choose Include to have MediaConvert generate an HLS child manifest that lists + // only the I-frames for this rendition, in addition to your regular manifest for + // this rendition. You might use this manifest as part of a workflow that creates + // preview functions for your video. MediaConvert adds both the I-frame only child + // manifest and the regular child manifest to the parent manifest. When you don't + // need the I-frame only child manifest, keep the default value Exclude. IFrameOnlyManifest CmfcIFrameOnlyManifest // To include key-length-value metadata in this output: Set KLV metadata insertion @@ -1833,45 +1743,44 @@ type CmfcSettings struct { // InbandEventStream element schemeIdUri will be "urn:scte:scte35:2013:bin". To // leave these elements out of your output MPD manifest, set Manifest metadata // signaling to Disabled. To enable Manifest metadata signaling, you must also set - // SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata - // (TimedMetadata) to Passthrough. + // SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to + // Passthrough. ManifestMetadataSignaling CmfcManifestMetadataSignaling // Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT // to put SCTE-35 markers in this output at the insertion points that you specify - // in an ESAM XML document. Provide the document in the setting SCC XML (sccXml). + // in an ESAM XML document. Provide the document in the setting SCC XML. Scte35Esam CmfcScte35Esam // Ignore this setting unless you have SCTE-35 markers in your input video file. - // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your - // input to also appear in this output. Choose None (NONE) if you don't want those - // SCTE-35 markers in this output. + // Choose Passthrough if you want SCTE-35 markers that appear in your input to also + // appear in this output. Choose None if you don't want those SCTE-35 markers in + // this output. Scte35Source CmfcScte35Source - // To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to - // Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata - // inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 - // metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: - // Set ID3 metadata to None (NONE) or leave blank. + // To include ID3 metadata in this output: Set ID3 metadata to Passthrough. + // Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes + // each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude + // this ID3 metadata: Set ID3 metadata to None or leave blank. TimedMetadata CmfcTimedMetadata // Specify the event message box (eMSG) version for ID3 timed metadata in your // output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 // Syntax. Leave blank to use the default value Version 0. When you specify Version - // 1, you must also set ID3 metadata (timedMetadata) to Passthrough. + // 1, you must also set ID3 metadata to Passthrough. TimedMetadataBoxVersion CmfcTimedMetadataBoxVersion - // Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 - // timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 - // section 5.10.3.3.4 Semantics. Leave blank to use the default value: - // https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID - // URI, you must also set ID3 metadata (timedMetadata) to Passthrough. + // Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata in + // your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 + // Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 + // When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 + // metadata to Passthrough. TimedMetadataSchemeIdUri *string // Specify the event message box (eMSG) value for ID3 timed metadata in your // output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 // Semantics. When you specify a value for ID3 Metadata Value, you must also set - // ID3 metadata (timedMetadata) to Passthrough. + // ID3 metadata to Passthrough. TimedMetadataValue *string noSmithyDocumentSerde @@ -1910,12 +1819,11 @@ type ColorCorrector struct { // signaled in the output. These values don't affect the pixel values that are // encoded in the video stream. They are intended to help the downstream video // player display content in a way that reflects the intentions of the the content - // creator. When you set Color space conversion (ColorSpaceConversion) to HDR 10 - // (FORCE_HDR10), these settings are required. You must set values for Max frame - // average light level (maxFrameAverageLightLevel) and Max content light level - // (maxContentLightLevel); these settings don't have a default value. The default - // values for the other HDR 10 metadata settings are defined by the P3D65 color - // space. For more information about MediaConvert HDR jobs, see + // creator. When you set Color space conversion to HDR 10, these settings are + // required. You must set values for Max frame average light level and Max content + // light level; these settings don't have a default value. The default values for + // the other HDR 10 metadata settings are defined by the P3D65 color space. For + // more information about MediaConvert HDR jobs, see // https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata @@ -1982,15 +1890,14 @@ type ContainerSettings struct { F4vSettings *F4vSettings // MPEG-2 TS container settings. These apply to outputs in a File output group - // when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). - // In these assets, data is organized by the program map table (PMT). Each - // transport stream program contains subsets of data, including audio, video, and - // metadata. Each of these subsets of data has a numerical label called a packet - // identifier (PID). Each transport stream program corresponds to one MediaConvert - // output. The PMT lists the types of data in a program along with their PID. - // Downstream systems and players use the program map table to look up the PID for - // each type of data it accesses and then uses the PIDs to locate specific data - // within the asset. + // when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, + // data is organized by the program map table (PMT). Each transport stream program + // contains subsets of data, including audio, video, and metadata. Each of these + // subsets of data has a numerical label called a packet identifier (PID). Each + // transport stream program corresponds to one MediaConvert output. The PMT lists + // the types of data in a program along with their PID. Downstream systems and + // players use the program map table to look up the PID for each type of data it + // accesses and then uses the PIDs to locate specific data within the asset. M2tsSettings *M2tsSettings // These settings relate to the MPEG-2 transport stream (MPEG2-TS) container for @@ -2039,10 +1946,10 @@ type DashIsoEncryptionSettings struct { // This setting can improve the compatibility of your output with video players on // obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. - // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback - // on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you - // choose Unencrypted SEI, for that output, the service will exclude the access - // unit delimiter and will leave the SEI NAL units unencrypted. + // Choose Unencrypted SEI only to correct problems with playback on older devices. + // Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for + // that output, the service will exclude the access unit delimiter and will leave + // the SEI NAL units unencrypted. PlaybackDeviceCompatibility DashIsoPlaybackDeviceCompatibility // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings @@ -2054,10 +1961,7 @@ type DashIsoEncryptionSettings struct { } // Settings related to your DASH output package. For more information, see -// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When -// you work directly in your JSON job specification, include this object and any -// required children when you set Type, under OutputGroupSettings, to -// DASH_ISO_GROUP_SETTINGS. +// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. type DashIsoGroupSettings struct { // By default, the service creates one .mpd DASH manifest for each DASH ISO output @@ -2071,10 +1975,9 @@ type DashIsoGroupSettings struct { // Dolby channel configuration tag, rather than the MPEG one. For example, you // might need to use this to make dynamic ad insertion work. Specify which audio // channel configuration scheme ID URI MediaConvert writes in your DASH manifest. - // Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), - // to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. - // Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have - // MediaConvert write this instead: + // Keep the default value, MPEG channel configuration, to have MediaConvert write + // this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel + // configuration to have MediaConvert write this instead: // tag:dolby.com,2014:dash:audio_channel_configuration:2011. AudioChannelConfigSchemeIdUri DashIsoGroupAudioChannelConfigSchemeIdUri @@ -2092,11 +1995,10 @@ type DashIsoGroupSettings struct { // in each AdaptationSet: Choose Distinct. DashManifestStyle DashManifestStyle - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify the - // base filename in the URI, the service will use the filename of the input file. - // If your job has multiple inputs, the service uses the filename of the first - // input file. + // Use Destination to specify the S3 output location and the output filename base. + // Destination accepts format identifiers. If you do not specify the base filename + // in the URI, the service will use the filename of the input file. If your job has + // multiple inputs, the service uses the filename of the first input file. Destination *string // Settings associated with the destination. Will vary based on the type of @@ -2118,13 +2020,13 @@ type DashIsoGroupSettings struct { HbbtvCompliance DashIsoHbbtvCompliance // Specify whether MediaConvert generates images for trick play. Keep the default - // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to - // generate tiled thumbnails. Choose Thumbnail and full frame - // (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution - // images of single frames. MediaConvert adds an entry in the .mpd manifest for - // each set of images that you generate. A common application for these images is - // Roku trick mode. The thumbnails and full-frame images that MediaConvert creates - // with this feature are compatible with this Roku specification: + // value, None, to not generate any images. Choose Thumbnail to generate tiled + // thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and + // full-resolution images of single frames. MediaConvert adds an entry in the .mpd + // manifest for each set of images that you generate. A common application for + // these images is Roku trick mode. The thumbnails and full-frame images that + // MediaConvert creates with this feature are compatible with this Roku + // specification: // https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay DashIsoImageBasedTrickPlay @@ -2157,23 +2059,22 @@ type DashIsoGroupSettings struct { MpdManifestBandwidthType DashIsoMpdManifestBandwidthType // Specify whether your DASH profile is on-demand or main. When you choose Main - // profile (MAIN_PROFILE), the service signals - // urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you - // choose On-demand (ON_DEMAND_PROFILE), the service signals + // profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd + // DASH manifest. When you choose On-demand, the service signals // urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose - // On-demand, you must also set the output group setting Segment control - // (SegmentControl) to Single file (SINGLE_FILE). + // On-demand, you must also set the output group setting Segment control to Single + // file. MpdProfile DashIsoMpdProfile // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps - // in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you - // want MediaConvert to use the initial PTS as the first time stamp in the - // manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial - // PTS in the video stream and instead write the initial time stamp as zero in the - // manifest. For outputs that don't have B-frames, the time stamps in your DASH - // manifests start at zero regardless of your choice here. + // in output DASH manifests. Choose Match initial PTS when you want MediaConvert to + // use the initial PTS as the first time stamp in the manifest. Choose Zero-based + // to have MediaConvert ignore the initial PTS in the video stream and instead + // write the initial time stamp as zero in the manifest. For outputs that don't + // have B-frames, the time stamps in your DASH manifests start at zero regardless + // of your choice here. PtsOffsetHandlingForBFrames DashIsoPtsOffsetHandlingForBFrames // When set to SINGLE_FILE, a single output file is generated, which is internally @@ -2183,17 +2084,15 @@ type DashIsoGroupSettings struct { // Specify the length, in whole seconds, of each segment. When you don't specify a // value, MediaConvert defaults to 30. Related settings: Use Segment length control - // (SegmentLengthControl) to specify whether the encoder enforces this value - // strictly. Use Segment control (DashIsoSegmentControl) to specify whether - // MediaConvert creates separate segment files or one content file that has - // metadata to mark the segment boundaries. + // to specify whether the encoder enforces this value strictly. Use Segment control + // to specify whether MediaConvert creates separate segment files or one content + // file that has metadata to mark the segment boundaries. SegmentLength int32 // Specify how you want MediaConvert to determine the segment length. Choose Exact - // (EXACT) to have the encoder use the exact length that you specify with the - // setting Segment length (SegmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // to have the encoder use the exact length that you specify with the setting + // Segment length. This might result in extra I-frames. Choose Multiple of GOP to + // have the encoder round up the segment lengths to match the next GOP boundary. SegmentLengthControl DashIsoSegmentLengthControl // Specify the video sample composition time offset mode in the output fMP4 TRUN @@ -2273,10 +2172,11 @@ type Deinterlacer struct { // probably result in lower quality video. Control DeinterlacerControl - // Use Deinterlacer (DeinterlaceMode) to choose how the service will do - // deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to - // progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive - // 23.976p. - Adaptive auto-detects and converts to progressive. + // Use Deinterlacer to choose how the service will do deinterlacing. Default is + // Deinterlace. + // - Deinterlace converts interlaced to progressive. + // - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. + // - Adaptive auto-detects and converts to progressive. Mode DeinterlacerMode noSmithyDocumentSerde @@ -2340,9 +2240,7 @@ type DolbyVisionLevel6Metadata struct { } // Use these settings to insert a DVB Network Information Table (NIT) in the -// transport stream of this output. When you work directly in your JSON job -// specification, include this object only when your job has a transport stream -// output and the container settings contain the object M2tsSettings. +// transport stream of this output. type DvbNitSettings struct { // The numeric value placed in the Network Information Table (NIT). @@ -2360,9 +2258,7 @@ type DvbNitSettings struct { } // Use these settings to insert a DVB Service Description Table (SDT) in the -// transport stream of this output. When you work directly in your JSON job -// specification, include this object only when your job has a transport stream -// output and the container settings contain the object M2tsSettings. +// transport stream of this output. type DvbSdtSettings struct { // Selects method of inserting SDT information into output stream. "Follow input @@ -2391,8 +2287,6 @@ type DvbSdtSettings struct { // Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same // output as your video. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to DVB_SUB. type DvbSubDestinationSettings struct { // Specify the alignment of your captions. If no explicit x_position is provided, @@ -2404,28 +2298,28 @@ type DvbSubDestinationSettings struct { // settings must be identical. Alignment DvbSubtitleAlignment - // Ignore this setting unless Style Passthrough (StylePassthrough) is set to - // Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or - // Hex. Use Apply font color (ApplyFontColor) for additional font color controls. - // When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font - // color setting only applies to white text in your input captions. For example, if - // your font color setting is Yellow, and your input captions have red and white - // text, your output captions will have red and yellow text. When you choose - // ALL_TEXT, your font color setting applies to all of your output captions text. + // Ignore this setting unless Style Passthrough is set to Enabled and Font color + // set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for + // additional font color controls. When you choose White text only, or leave blank, + // your font color setting only applies to white text in your input captions. For + // example, if your font color setting is Yellow, and your input captions have red + // and white text, your output captions will have red and yellow text. When you + // choose ALL_TEXT, your font color setting applies to all of your output captions + // text. ApplyFontColor DvbSubtitleApplyFontColor // Specify the color of the rectangle behind the captions. Leave background color - // (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled - // to use the background color data from your input captions, if present. + // blank and set Style passthrough to enabled to use the background color data from + // your input captions, if present. BackgroundColor DvbSubtitleBackgroundColor // Specify the opacity of the background rectangle. Enter a value from 0 to 255, - // where 0 is transparent and 255 is opaque. If Style passthrough - // (StylePassthrough) is set to enabled, leave blank to pass through the background - // style information in your input captions to your output captions. If Style - // passthrough is set to disabled, leave blank to use a value of 0 and remove all - // backgrounds from your output captions. Within your job settings, all of your - // DVB-Sub settings must be identical. + // where 0 is transparent and 255 is opaque. If Style passthrough is set to + // enabled, leave blank to pass through the background style information in your + // input captions to your output captions. If Style passthrough is set to disabled, + // leave blank to use a value of 0 and remove all backgrounds from your output + // captions. Within your job settings, all of your DVB-Sub settings must be + // identical. BackgroundOpacity int32 // Specify how MediaConvert handles the display definition segment (DDS). To @@ -2439,68 +2333,67 @@ type DvbSubDestinationSettings struct { // you choose for DDS handling. All burn-in and DVB-Sub font settings must match. DdsHandling DvbddsHandling - // Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify the - // upper left corner of the display definition segment (DDS) display window. With - // this setting, specify the distance, in pixels, between the left side of the - // frame and the left side of the DDS display window. Keep the default value, 0, to - // have MediaConvert automatically choose this offset. Related setting: When you - // use this setting, you must set DDS handling (ddsHandling) to a value other than - // None (NONE). MediaConvert uses these values to determine whether to write page - // position data to the DDS or to the page composition segment (PCS). All burn-in - // and DVB-Sub font settings must match. + // Use this setting, along with DDS y-coordinate, to specify the upper left corner + // of the display definition segment (DDS) display window. With this setting, + // specify the distance, in pixels, between the left side of the frame and the left + // side of the DDS display window. Keep the default value, 0, to have MediaConvert + // automatically choose this offset. Related setting: When you use this setting, + // you must set DDS handling to a value other than None. MediaConvert uses these + // values to determine whether to write page position data to the DDS or to the + // page composition segment. All burn-in and DVB-Sub font settings must match. DdsXCoordinate int32 - // Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify the - // upper left corner of the display definition segment (DDS) display window. With - // this setting, specify the distance, in pixels, between the top of the frame and - // the top of the DDS display window. Keep the default value, 0, to have - // MediaConvert automatically choose this offset. Related setting: When you use - // this setting, you must set DDS handling (ddsHandling) to a value other than None - // (NONE). MediaConvert uses these values to determine whether to write page - // position data to the DDS or to the page composition segment (PCS). All burn-in - // and DVB-Sub font settings must match. + // Use this setting, along with DDS x-coordinate, to specify the upper left corner + // of the display definition segment (DDS) display window. With this setting, + // specify the distance, in pixels, between the top of the frame and the top of the + // DDS display window. Keep the default value, 0, to have MediaConvert + // automatically choose this offset. Related setting: When you use this setting, + // you must set DDS handling to a value other than None. MediaConvert uses these + // values to determine whether to write page position data to the DDS or to the + // page composition segment (PCS). All burn-in and DVB-Sub font settings must + // match. DdsYCoordinate int32 // Specify the font that you want the service to use for your burn in captions // when your input captions specify a font that MediaConvert doesn't support. When - // you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, - // MediaConvert uses a supported font that most closely matches the font that your - // input captions specify. When there are multiple unsupported fonts in your input - // captions, MediaConvert matches each font with the supported font that matches - // best. When you explicitly choose a replacement font, MediaConvert uses that font - // to replace all unsupported fonts from your input. + // you set Fallback font to best match, or leave blank, MediaConvert uses a + // supported font that most closely matches the font that your input captions + // specify. When there are multiple unsupported fonts in your input captions, + // MediaConvert matches each font with the supported font that matches best. When + // you explicitly choose a replacement font, MediaConvert uses that font to replace + // all unsupported fonts from your input. FallbackFont DvbSubSubtitleFallbackFont - // Specify the color of the captions text. Leave Font color (FontColor) blank and - // set Style passthrough (StylePassthrough) to enabled to use the font color data - // from your input captions, if present. Within your job settings, all of your - // DVB-Sub settings must be identical. + // Specify the color of the captions text. Leave Font color blank and set Style + // passthrough to enabled to use the font color data from your input captions, if + // present. Within your job settings, all of your DVB-Sub settings must be + // identical. FontColor DvbSubtitleFontColor // Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent. // Within your job settings, all of your DVB-Sub settings must be identical. FontOpacity int32 - // Specify the Font resolution (FontResolution) in DPI (dots per inch). Within - // your job settings, all of your DVB-Sub settings must be identical. + // Specify the Font resolution in DPI (dots per inch). Within your job settings, + // all of your DVB-Sub settings must be identical. FontResolution int32 - // Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave - // blank, to automatically determine the font script in your input captions. - // Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if - // your input font script uses Simplified or Traditional Chinese. Within your job - // settings, all of your DVB-Sub settings must be identical. + // Set Font script to Automatically determined, or leave blank, to automatically + // determine the font script in your input captions. Otherwise, set to Simplified + // Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses + // Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub + // settings must be identical. FontScript FontScript - // Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to - // 0, or leave blank, for automatic font size. Within your job settings, all of - // your DVB-Sub settings must be identical. + // Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave + // blank, for automatic font size. Within your job settings, all of your DVB-Sub + // settings must be identical. FontSize int32 // Specify the height, in pixels, of this set of DVB-Sub captions. The default // value is 576 pixels. Related setting: When you use this setting, you must set - // DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and - // DVB-Sub font settings must match. + // DDS handling to a value other than None. All burn-in and DVB-Sub font settings + // must match. Height int32 // Ignore this setting unless your Font color is set to Hex. Enter either six or @@ -2509,31 +2402,30 @@ type DvbSubDestinationSettings struct { // a green value of 0x22, a blue value of 0xAA, and an alpha value of 0xBB. HexFontColor *string - // Specify font outline color. Leave Outline color (OutlineColor) blank and set - // Style passthrough (StylePassthrough) to enabled to use the font outline color - // data from your input captions, if present. Within your job settings, all of your - // DVB-Sub settings must be identical. + // Specify font outline color. Leave Outline color blank and set Style passthrough + // to enabled to use the font outline color data from your input captions, if + // present. Within your job settings, all of your DVB-Sub settings must be + // identical. OutlineColor DvbSubtitleOutlineColor - // Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave - // Outline size blank and set Style passthrough (StylePassthrough) to enabled to - // use the outline size data from your input captions, if present. Within your job - // settings, all of your DVB-Sub settings must be identical. + // Specify the Outline size of the caption text, in pixels. Leave Outline size + // blank and set Style passthrough to enabled to use the outline size data from + // your input captions, if present. Within your job settings, all of your DVB-Sub + // settings must be identical. OutlineSize int32 - // Specify the color of the shadow cast by the captions. Leave Shadow color - // (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to - // use the shadow color data from your input captions, if present. Within your job - // settings, all of your DVB-Sub settings must be identical. + // Specify the color of the shadow cast by the captions. Leave Shadow color blank + // and set Style passthrough to enabled to use the shadow color data from your + // input captions, if present. Within your job settings, all of your DVB-Sub + // settings must be identical. ShadowColor DvbSubtitleShadowColor // Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is - // transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to - // Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow - // style information in your input captions to your output captions. If Style - // passthrough is set to disabled, leave blank to use a value of 0 and remove all - // shadows from your output captions. Within your job settings, all of your DVB-Sub - // settings must be identical. + // transparent and 255 is opaque. If Style passthrough is set to Enabled, leave + // Shadow opacity blank to pass through the shadow style information in your input + // captions to your output captions. If Style passthrough is set to disabled, leave + // blank to use a value of 0 and remove all shadows from your output captions. + // Within your job settings, all of your DVB-Sub settings must be identical. ShadowOpacity int32 // Specify the horizontal offset of the shadow, relative to the captions in @@ -2543,20 +2435,19 @@ type DvbSubDestinationSettings struct { // Specify the vertical offset of the shadow relative to the captions in pixels. A // value of -2 would result in a shadow offset 2 pixels above the text. Leave - // Shadow y-offset (ShadowYOffset) blank and set Style passthrough - // (StylePassthrough) to enabled to use the shadow y-offset data from your input - // captions, if present. Within your job settings, all of your DVB-Sub settings - // must be identical. + // Shadow y-offset blank and set Style passthrough to enabled to use the shadow + // y-offset data from your input captions, if present. Within your job settings, + // all of your DVB-Sub settings must be identical. ShadowYOffset int32 - // Set Style passthrough (StylePassthrough) to ENABLED to use the available style, - // color, and position information from your input captions. MediaConvert uses - // default settings for any missing style and position information in your input - // captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style - // and position information from your input captions and use default settings: - // white text with black outlining, bottom-center positioning, and automatic - // sizing. Whether you set Style passthrough to enabled or not, you can also choose - // to manually override any of the individual style and position settings. + // Set Style passthrough to ENABLED to use the available style, color, and + // position information from your input captions. MediaConvert uses default + // settings for any missing style and position information in your input captions. + // Set Style passthrough to DISABLED, or leave blank, to ignore the style and + // position information from your input captions and use default settings: white + // text with black outlining, bottom-center positioning, and automatic sizing. + // Whether you set Style passthrough to enabled or not, you can also choose to + // manually override any of the individual style and position settings. StylePassthrough DvbSubtitleStylePassthrough // Specify whether your DVB subtitles are standard or for hearing impaired. Choose @@ -2564,33 +2455,31 @@ type DvbSubDestinationSettings struct { // Choose standard if your subtitles include only dialogue. SubtitlingType DvbSubtitlingType - // Specify whether the Text spacing (TeletextSpacing) in your captions is set by - // the captions grid, or varies depending on letter width. Choose fixed grid - // (FIXED_GRID) to conform to the spacing specified in the captions file more - // accurately. Choose proportional (PROPORTIONAL) to make the text easier to read - // for closed captions. Within your job settings, all of your DVB-Sub settings must - // be identical. + // Specify whether the Text spacing in your captions is set by the captions grid, + // or varies depending on letter width. Choose fixed grid to conform to the spacing + // specified in the captions file more accurately. Choose proportional to make the + // text easier to read for closed captions. Within your job settings, all of your + // DVB-Sub settings must be identical. TeletextSpacing DvbSubtitleTeletextSpacing // Specify the width, in pixels, of this set of DVB-Sub captions. The default // value is 720 pixels. Related setting: When you use this setting, you must set - // DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and - // DVB-Sub font settings must match. + // DDS handling to a value other than None. All burn-in and DVB-Sub font settings + // must match. Width int32 - // Specify the horizontal position (XPosition) of the captions, relative to the - // left side of the outputin pixels. A value of 10 would result in the captions - // starting 10 pixels from the left ofthe output. If no explicit x_position is - // provided, the horizontal caption position will bedetermined by the alignment - // parameter. Within your job settings, all of your DVB-Sub settings must be - // identical. + // Specify the horizontal position of the captions, relative to the left side of + // the output in pixels. A value of 10 would result in the captions starting 10 + // pixels from the left of the output. If no explicit x_position is provided, the + // horizontal caption position will be determined by the alignment parameter. + // Within your job settings, all of your DVB-Sub settings must be identical. XPosition int32 - // Specify the vertical position (YPosition) of the captions, relative to the top - // of the output in pixels. A value of 10 would result in the captions starting 10 - // pixels from the top of the output. If no explicit y_position is provided, the - // caption will be positioned towards the bottom of the output. Within your job - // settings, all of your DVB-Sub settings must be identical. + // Specify the vertical position of the captions, relative to the top of the + // output in pixels. A value of 10 would result in the captions starting 10 pixels + // from the top of the output. If no explicit y_position is provided, the caption + // will be positioned towards the bottom of the output. Within your job settings, + // all of your DVB-Sub settings must be identical. YPosition int32 noSmithyDocumentSerde @@ -2608,9 +2497,7 @@ type DvbSubSourceSettings struct { } // Use these settings to insert a DVB Time and Date Table (TDT) in the transport -// stream of this output. When you work directly in your JSON job specification, -// include this object only when your job has a transport stream output and the -// container settings contain the object M2tsSettings. +// stream of this output. type DvbTdtSettings struct { // The number of milliseconds between instances of this table in the output @@ -2620,8 +2507,7 @@ type DvbTdtSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the -// value EAC3_ATMOS. +// Required when you set Codec to the value EAC3_ATMOS. type Eac3AtmosSettings struct { // Specify the average bitrate for this output in bits per second. Valid values: @@ -2643,95 +2529,79 @@ type Eac3AtmosSettings struct { DialogueIntelligence Eac3AtmosDialogueIntelligence // Specify whether MediaConvert should use any downmix metadata from your input - // file. Keep the default value, Custom (SPECIFIED) to provide downmix values in - // your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the - // metadata from your input. Related settings--Use these settings to specify your - // downmix values: Left only/Right only surround (LoRoSurroundMixLevel), Left - // total/Right total surround (LtRtSurroundMixLevel), Left total/Right total center - // (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), and - // Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for Downmix - // control (DownmixControl) and you don't specify values for the related settings, + // file. Keep the default value, Custom to provide downmix values in your job + // settings. Choose Follow source to use the metadata from your input. Related + // settings--Use these settings to specify your downmix values: Left only/Right + // only surround, Left total/Right total surround, Left total/Right total center, + // Left only/Right only center, and Stereo downmix. When you keep Custom for + // Downmix control and you don't specify values for the related settings, // MediaConvert uses default values for those settings. DownmixControl Eac3AtmosDownmixControl // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the line operating mode. - // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: - // To have MediaConvert use the value you specify here, keep the default value, - // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). - // Otherwise, MediaConvert ignores Dynamic range compression line - // (DynamicRangeCompressionLine). For information about the Dolby DRC operating - // modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata - // Guide at + // Default value: Film light Related setting: To have MediaConvert use the value + // you specify here, keep the default value, Custom for the setting Dynamic range + // control. Otherwise, MediaConvert ignores Dynamic range compression line. For + // information about the Dolby DRC operating modes and profiles, see the Dynamic + // Range Control chapter of the Dolby Metadata Guide at // https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine Eac3AtmosDynamicRangeCompressionLine // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the RF operating mode. - // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: - // To have MediaConvert use the value you specify here, keep the default value, - // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). - // Otherwise, MediaConvert ignores Dynamic range compression RF - // (DynamicRangeCompressionRf). For information about the Dolby DRC operating modes - // and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide - // at + // Default value: Film light Related setting: To have MediaConvert use the value + // you specify here, keep the default value, Custom for the setting Dynamic range + // control. Otherwise, MediaConvert ignores Dynamic range compression RF. For + // information about the Dolby DRC operating modes and profiles, see the Dynamic + // Range Control chapter of the Dolby Metadata Guide at // https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf Eac3AtmosDynamicRangeCompressionRf // Specify whether MediaConvert should use any dynamic range control metadata from - // your input file. Keep the default value, Custom (SPECIFIED), to provide dynamic - // range control values in your job settings. Choose Follow source - // (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related - // settings--Use these settings to specify your dynamic range control values: - // Dynamic range compression line (DynamicRangeCompressionLine) and Dynamic range - // compression RF (DynamicRangeCompressionRf). When you keep the value Custom - // (SPECIFIED) for Dynamic range control (DynamicRangeControl) and you don't - // specify values for the related settings, MediaConvert uses default values for - // those settings. + // your input file. Keep the default value, Custom, to provide dynamic range + // control values in your job settings. Choose Follow source to use the metadata + // from your input. Related settings--Use these settings to specify your dynamic + // range control values: Dynamic range compression line and Dynamic range + // compression RF. When you keep the value Custom for Dynamic range control and you + // don't specify values for the related settings, MediaConvert uses default values + // for those settings. DynamicRangeControl Eac3AtmosDynamicRangeControl // Specify a value for the following Dolby Atmos setting: Left only/Right only // center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default - // value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: 3.0, 1.5, 0.0, - // -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value - // depends on the value that you choose for Stereo downmix - // (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, - // keep the default value, Custom (SPECIFIED) for the setting Downmix control - // (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only center - // (LoRoCenterMixLevel). + // value: -3 dB. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related + // setting: How the service uses this value depends on the value that you choose + // for Stereo downmix. Related setting: To have MediaConvert use this value, keep + // the default value, Custom for the setting Downmix control. Otherwise, + // MediaConvert ignores Left only/Right only center. LoRoCenterMixLevel float64 - // Specify a value for the following Dolby Atmos setting: Left only/Right only - // (Lo/Ro surround). MediaConvert uses this value for downmixing. Default value: -3 - // dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: -1.5, -3.0, -4.5, -6.0, - // and -60. The value -60 mutes the channel. Related setting: How the service uses - // this value depends on the value that you choose for Stereo downmix - // (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, - // keep the default value, Custom (SPECIFIED) for the setting Downmix control - // (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only surround - // (LoRoSurroundMixLevel). + // Specify a value for the following Dolby Atmos setting: Left only/Right only. + // MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: + // -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related + // setting: How the service uses this value depends on the value that you choose + // for Stereo downmix. Related setting: To have MediaConvert use this value, keep + // the default value, Custom for the setting Downmix control. Otherwise, + // MediaConvert ignores Left only/Right only surround. LoRoSurroundMixLevel float64 // Specify a value for the following Dolby Atmos setting: Left total/Right total // center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default - // value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: 3.0, 1.5, 0.0, - // -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value - // depends on the value that you choose for Stereo downmix - // (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, - // keep the default value, Custom (SPECIFIED) for the setting Downmix control - // (DownmixControl). Otherwise, MediaConvert ignores Left total/Right total center - // (LtRtCenterMixLevel). + // value: -3 dB Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related + // setting: How the service uses this value depends on the value that you choose + // for Stereo downmix. Related setting: To have MediaConvert use this value, keep + // the default value, Custom for the setting Downmix control. Otherwise, + // MediaConvert ignores Left total/Right total center. LtRtCenterMixLevel float64 // Specify a value for the following Dolby Atmos setting: Left total/Right total // surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. - // Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: -1.5, - // -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How - // the service uses this value depends on the value that you choose for Stereo - // downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this - // value, keep the default value, Custom (SPECIFIED) for the setting Downmix - // control (DownmixControl). Otherwise, the service ignores Left total/Right total - // surround (LtRtSurroundMixLevel). + // Default value: -3 dB Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value + // -60 mutes the channel. Related setting: How the service uses this value depends + // on the value that you choose for Stereo downmix. Related setting: To have + // MediaConvert use this value, keep the default value, Custom for the setting + // Downmix control. Otherwise, the service ignores Left total/Right total surround. LtRtSurroundMixLevel float64 // Choose how the service meters the loudness of your audio. @@ -2746,10 +2616,9 @@ type Eac3AtmosSettings struct { SpeechThreshold int32 // Choose how the service does stereo downmixing. Default value: Not indicated - // (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert - // use this value, keep the default value, Custom (SPECIFIED) for the setting - // Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo downmix - // (StereoDownmix). + // Related setting: To have MediaConvert use this value, keep the default value, + // Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo + // downmix. StereoDownmix Eac3AtmosStereoDownmix // Specify whether your input audio has an additional center rear surround channel @@ -2759,8 +2628,7 @@ type Eac3AtmosSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the -// value EAC3. +// Required when you set Codec to the value EAC3. type Eac3Settings struct { // If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. @@ -2793,20 +2661,18 @@ type Eac3Settings struct { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert ignores - // any value you provide for Dynamic range compression profile - // (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC - // operating modes and profiles, see the Dynamic Range Control chapter of the Dolby - // Metadata Guide at + // any value you provide for Dynamic range compression profile. For information + // about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range + // Control chapter of the Dolby Metadata Guide at // https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine Eac3DynamicRangeCompressionLine // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any value - // you provide for Dynamic range compression profile - // (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC - // operating modes and profiles, see the Dynamic Range Control chapter of the Dolby - // Metadata Guide at + // you provide for Dynamic range compression profile. For information about the + // Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control + // chapter of the Dolby Metadata Guide at // https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf Eac3DynamicRangeCompressionRf @@ -2818,43 +2684,39 @@ type Eac3Settings struct { LfeFilter Eac3LfeFilter // Specify a value for the following Dolby Digital Plus setting: Left only/Right - // only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. How - // the service uses this value depends on the value that you choose for Stereo - // downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, - // -6.0, and -60. The value -60 mutes the channel. This setting applies only if you - // keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the - // setting Coding mode (Eac3CodingMode). If you choose a different value for Coding - // mode, the service ignores Left only/Right only center (loRoCenterMixLevel). + // only center mix. MediaConvert uses this value for downmixing. How the service + // uses this value depends on the value that you choose for Stereo downmix. Valid + // values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the + // channel. This setting applies only if you keep the default value of 3/2 - L, R, + // C, Ls, Rs for the setting Coding mode. If you choose a different value for + // Coding mode, the service ignores Left only/Right only center. LoRoCenterMixLevel float64 // Specify a value for the following Dolby Digital Plus setting: Left only/Right - // only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the - // service uses this value depends on the value that you choose for Stereo downmix - // (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value - // -60 mutes the channel. This setting applies only if you keep the default value - // of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode - // (Eac3CodingMode). If you choose a different value for Coding mode, the service - // ignores Left only/Right only surround (loRoSurroundMixLevel). + // only. MediaConvert uses this value for downmixing. How the service uses this + // value depends on the value that you choose for Stereo downmix. Valid values: + // -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting + // applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the + // setting Coding mode. If you choose a different value for Coding mode, the + // service ignores Left only/Right only surround. LoRoSurroundMixLevel float64 // Specify a value for the following Dolby Digital Plus setting: Left total/Right - // total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. - // How the service uses this value depends on the value that you choose for Stereo - // downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, - // -6.0, and -60. The value -60 mutes the channel. This setting applies only if you - // keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the - // setting Coding mode (Eac3CodingMode). If you choose a different value for Coding - // mode, the service ignores Left total/Right total center (ltRtCenterMixLevel). + // total center mix. MediaConvert uses this value for downmixing. How the service + // uses this value depends on the value that you choose for Stereo downmix. Valid + // values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the + // channel. This setting applies only if you keep the default value of 3/2 - L, R, + // C, Ls, Rs for the setting Coding mode. If you choose a different value for + // Coding mode, the service ignores Left total/Right total center. LtRtCenterMixLevel float64 // Specify a value for the following Dolby Digital Plus setting: Left total/Right - // total surround mix (Lt/Rt surround). MediaConvert uses this value for - // downmixing. How the service uses this value depends on the value that you choose - // for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, - // and -60. The value -60 mutes the channel. This setting applies only if you keep - // the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting - // Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, - // the service ignores Left total/Right total surround (ltRtSurroundMixLevel). + // total surround mix. MediaConvert uses this value for downmixing. How the service + // uses this value depends on the value that you choose for Stereo downmix. Valid + // values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This + // setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for + // the setting Coding mode. If you choose a different value for Coding mode, the + // service ignores Left total/Right total surround. LtRtSurroundMixLevel float64 // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, or @@ -2876,9 +2738,9 @@ type Eac3Settings struct { SampleRate int32 // Choose how the service does stereo downmixing. This setting only applies if you - // keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the - // setting Coding mode (Eac3CodingMode). If you choose a different value for Coding - // mode, the service ignores Stereo downmix (Eac3StereoDownmix). + // keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If + // you choose a different value for Coding mode, the service ignores Stereo + // downmix. StereoDownmix Eac3StereoDownmix // When encoding 3/2 audio, sets whether an extra center back surround channel is @@ -2896,9 +2758,6 @@ type Eac3Settings struct { // ancillary) captions. Set up embedded captions in the same output as your video. // For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to EMBEDDED, -// EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED. type EmbeddedDestinationSettings struct { // Ignore this setting unless your input captions are SCC format and your output @@ -2911,11 +2770,10 @@ type EmbeddedDestinationSettings struct { // Ignore this setting unless your input captions are SCC format and you want both // 608 and 708 captions embedded in your output stream. Optionally, specify the 708 // service number for each output captions channel. Choose a different number for - // each channel. To use this setting, also set Force 608 to 708 upconvert - // (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector - // settings. If you choose to upconvert but don't specify a 708 service number, - // MediaConvert uses the number that you specify for CC channel number - // (destination608ChannelNumber) for the 708 service number. For more information, + // each channel. To use this setting, also set Force 608 to 708 upconvert to + // Upconvert in your input captions selector settings. If you choose to upconvert + // but don't specify a 708 service number, MediaConvert uses the number that you + // specify for CC channel number for the 708 service number. For more information, // see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. Destination708ServiceNumber int32 @@ -2926,10 +2784,9 @@ type EmbeddedDestinationSettings struct { type EmbeddedSourceSettings struct { // Specify whether this set of input captions appears in your outputs in both 608 - // and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the - // captions data in two ways: it passes the 608 data through using the 608 - // compatibility bytes fields of the 708 wrapper, and it also translates the 608 - // data into 708. + // and 708 format. If you choose Upconvert, MediaConvert includes the captions data + // in two ways: it passes the 608 data through using the 608 compatibility bytes + // fields of the 708 wrapper, and it also translates the 608 data into 708. Convert608To708 EmbeddedConvert608To708 // Specifies the 608/708 channel number within the video track from which to @@ -2975,7 +2832,7 @@ type EsamSettings struct { // Specifies an ESAM ManifestConfirmConditionNotification XML as per // OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning - // instructions that you provide in the setting MCC XML (mccXml). + // instructions that you provide in the setting MCC XML. ManifestConfirmConditionNotification *EsamManifestConfirmConditionNotification // Specifies the stream distance, in milliseconds, between the SCTE 35 messages @@ -2986,7 +2843,7 @@ type EsamSettings struct { // Specifies an ESAM SignalProcessingNotification XML as per // OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing - // instructions that you provide in the setting SCC XML (sccXml). + // instructions that you provide in the setting SCC XML. SignalProcessingNotification *EsamSignalProcessingNotification noSmithyDocumentSerde @@ -2998,11 +2855,10 @@ type EsamSignalProcessingNotification struct { // Provide your ESAM SignalProcessingNotification XML document inside your JSON // job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The // transcoder will use the signal processing instructions in the message that you - // supply. Provide your ESAM SignalProcessingNotification XML document inside your - // JSON job settings. For your MPEG2-TS file outputs, if you want the service to - // place SCTE-35 markers at the insertion points you specify in the XML document, - // you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify - // an ESAM XML document or enable SCTE-35 passthrough. You can't do both. + // supply. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 + // markers at the insertion points you specify in the XML document, you must also + // enable SCTE-35 ESAM. Note that you can either specify an ESAM XML document or + // enable SCTE-35 passthrough. You can't do both. SccXml *string noSmithyDocumentSerde @@ -3041,16 +2897,12 @@ type F4vSettings struct { // Settings related to your File output group. MediaConvert uses this group of // settings to generate a single standalone file, rather than a streaming package. -// When you work directly in your JSON job specification, include this object and -// any required children when you set Type, under OutputGroupSettings, to -// FILE_GROUP_SETTINGS. type FileGroupSettings struct { - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify the - // base filename in the URI, the service will use the filename of the input file. - // If your job has multiple inputs, the service uses the filename of the first - // input file. + // Use Destination to specify the S3 output location and the output filename base. + // Destination accepts format identifiers. If you do not specify the base filename + // in the URI, the service will use the filename of the input file. If your job has + // multiple inputs, the service uses the filename of the first input file. Destination *string // Settings associated with the destination. Will vary based on the type of @@ -3067,10 +2919,9 @@ type FileGroupSettings struct { type FileSourceSettings struct { // Specify whether this set of input captions appears in your outputs in both 608 - // and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the - // captions data in two ways: it passes the 608 data through using the 608 - // compatibility bytes fields of the 708 wrapper, and it also translates the 608 - // data into 708. + // and 708 format. If you choose Upconvert, MediaConvert includes the captions data + // in two ways: it passes the 608 data through using the 608 compatibility bytes + // fields of the 708 wrapper, and it also translates the 608 data into 708. Convert608To708 FileSourceConvert608To708 // Choose the presentation style of your input SCC captions. To use the same @@ -3082,10 +2933,8 @@ type FileSourceSettings struct { // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions and // input video, specify the frame rate of the captions file. Specify this value as - // a fraction. When you work directly in your JSON job specification, use the - // settings framerateNumerator and framerateDenominator. For example, you might - // specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or - // 30000 / 1001 for 29.97 fps. + // a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, + // 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps. Framerate *CaptionSourceFramerate // External caption file used for loading captions. Accepted file extensions are @@ -3099,18 +2948,18 @@ type FileSourceSettings struct { // For example, type 15 to add 15 seconds to all the times in the captions file. // Type -5 to subtract 5 seconds from the times in the captions file. You can // optionally specify your time delta in milliseconds instead of seconds. When you - // do so, set the related setting, Time delta units (TimeDeltaUnits) to - // Milliseconds (MILLISECONDS). Note that, when you specify a time delta for - // timecode-based caption sources, such as SCC and STL, and your time delta isn't a - // multiple of the input frame rate, MediaConvert snaps the captions to the nearest - // frame. For example, when your input video frame rate is 25 fps and you specify - // 1010ms for time delta, MediaConvert delays your captions by 1000 ms. + // do so, set the related setting, Time delta units to Milliseconds. Note that, + // when you specify a time delta for timecode-based caption sources, such as SCC + // and STL, and your time delta isn't a multiple of the input frame rate, + // MediaConvert snaps the captions to the nearest frame. For example, when your + // input video frame rate is 25 fps and you specify 1010ms for time delta, + // MediaConvert delays your captions by 1000 ms. TimeDelta int32 - // When you use the setting Time delta (TimeDelta) to adjust the sync between your - // sidecar captions and your video, use this setting to specify the units for the - // delta that you specify. When you don't specify a value for Time delta units - // (TimeDeltaUnits), MediaConvert uses seconds by default. + // When you use the setting Time delta to adjust the sync between your sidecar + // captions and your video, use this setting to specify the units for the delta + // that you specify. When you don't specify a value for Time delta units, + // MediaConvert uses seconds by default. TimeDeltaUnits FileSourceTimeDeltaUnits noSmithyDocumentSerde @@ -3138,8 +2987,7 @@ type ForceIncludeRenditionSize struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value FRAME_CAPTURE. +// Required when you set Codec to the value FRAME_CAPTURE. type FrameCaptureSettings struct { // Frame capture will encode the first frame of the output stream, then one frame @@ -3167,8 +3015,7 @@ type FrameCaptureSettings struct { } // Settings for quality-defined variable bitrate encoding with the H.264 codec. -// Use these settings only when you set QVBR for Rate control mode -// (RateControlMode). +// Use these settings only when you set QVBR for Rate control mode. type H264QvbrSettings struct { // Use this setting only when Rate control mode is QVBR and Quality tuning level @@ -3179,18 +3026,18 @@ type H264QvbrSettings struct { // seconds of encoded output. MaxAverageBitrate int32 - // Use this setting only when you set Rate control mode (RateControlMode) to QVBR. - // Specify the target quality level for this output. MediaConvert determines the - // right number of bits to use for each part of the video to maintain the video - // quality that you specify. When you keep the default value, AUTO, MediaConvert - // picks a quality level for you, based on characteristics of your input video. If - // you prefer to specify a quality level, specify a number from 1 through 10. Use - // higher numbers for greater quality. Level 10 results in nearly lossless - // compression. The quality level for most broadcast-quality transcodes is between - // 6 and 9. Optionally, to specify a value between whole numbers, also provide a - // value for the setting qvbrQualityLevelFineTune. For example, if you want your - // QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set - // qvbrQualityLevelFineTune to .33. + // Use this setting only when you set Rate control mode to QVBR. Specify the + // target quality level for this output. MediaConvert determines the right number + // of bits to use for each part of the video to maintain the video quality that you + // specify. When you keep the default value, AUTO, MediaConvert picks a quality + // level for you, based on characteristics of your input video. If you prefer to + // specify a quality level, specify a number from 1 through 10. Use higher numbers + // for greater quality. Level 10 results in nearly lossless compression. The + // quality level for most broadcast-quality transcodes is between 6 and 9. + // Optionally, to specify a value between whole numbers, also provide a value for + // the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality + // level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to + // .33. QvbrQualityLevel int32 // Optional. Specify a value here to set the QVBR quality to a level that is @@ -3204,20 +3051,18 @@ type H264QvbrSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value H_264. +// Required when you set Codec to the value H_264. type H264Settings struct { - // Keep the default value, Auto (AUTO), for this setting to have MediaConvert + // Keep the default value, Auto, for this setting to have MediaConvert // automatically apply the best types of quantization for your video content. When // you want to apply your quantization settings manually, you must set - // H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to - // specify the strength of any adaptive quantization filters that you enable. If - // you don't want MediaConvert to do any adaptive quantization in this transcode, - // set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related - // settings: The value that you choose here applies to the following settings: - // H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and - // H264TemporalAdaptiveQuantization. + // H264AdaptiveQuantization to a value other than Auto. Use this setting to specify + // the strength of any adaptive quantization filters that you enable. If you don't + // want MediaConvert to do any adaptive quantization in this transcode, set + // Adaptive quantization to Off. Related settings: The value that you choose here + // applies to the following settings: H264FlickerAdaptiveQuantization, + // H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization. AdaptiveQuantization H264AdaptiveQuantization // The Bandwidth reduction filter increases the video quality of your output @@ -3236,7 +3081,7 @@ type H264Settings struct { Bitrate int32 // Specify an H.264 level that is consistent with your output video settings. If - // you aren't sure what level to specify, choose Auto (AUTO). + // you aren't sure what level to specify, choose Auto. CodecLevel H264CodecLevel // H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the AVC-I @@ -3257,9 +3102,9 @@ type H264Settings struct { // The video encoding method for your MPEG-4 AVC output. Keep the default value, // PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose - // Force field (FORCE_FIELD) to disable PAFF encoding and create separate - // interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF - // encoding for interlaced outputs. + // Force field to disable PAFF encoding and create separate interlaced fields. + // Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for + // interlaced outputs. FieldEncoding H264FieldEncoding // Only use this setting when you change the default value, AUTO, for the setting @@ -3267,14 +3112,14 @@ type H264Settings struct { // H264AdaptiveQuantization and all other adaptive quantization from your JSON job // specification, MediaConvert automatically applies the best types of quantization // for your video content. When you set H264AdaptiveQuantization to a value other - // than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled - // (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. - // I-frame pop appears as a visual flicker that can arise when the encoder saves - // bits by copying some macroblocks many times from frame to frame, and then - // refreshes them at the I-frame. When you enable this setting, the encoder updates - // these macroblocks slightly more often to smooth out the flicker. To manually - // enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive - // quantization (H264AdaptiveQuantization) to a value other than AUTO. + // than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled. + // Change this value to Enabled to reduce I-frame pop. I-frame pop appears as a + // visual flicker that can arise when the encoder saves bits by copying some + // macroblocks many times from frame to frame, and then refreshes them at the + // I-frame. When you enable this setting, the encoder updates these macroblocks + // slightly more often to smooth out the flicker. To manually enable or disable + // H264FlickerAdaptiveQuantization, you must set Adaptive quantization to a value + // other than AUTO. FlickerAdaptiveQuantization H264FlickerAdaptiveQuantization // If you are using the console, use the Framerate setting to specify the frame @@ -3282,12 +3127,7 @@ type H264Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list or choose Custom. The framerates shown in the // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // specify your frame rate as a fraction. FramerateControl H264FramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -3329,34 +3169,30 @@ type H264Settings struct { // Specify the relative frequency of open to closed GOPs in this output. For // example, if you want to allow four open GOPs and then require a closed GOP, set // this value to 5. We recommend that you have the transcoder automatically choose - // this value for you based on characteristics of your input video. To enable this - // automatic behavior, keep the default value by leaving this setting out of your - // JSON job specification. In the console, do this by keeping the default empty - // value. If you do explicitly specify a value, for segmented outputs, don't set - // this value to 0. + // this value for you based on characteristics of your input video. In the console, + // do this by keeping the default empty value. If you do explicitly specify a + // value, for segmented outputs, don't set this value to 0. GopClosedCadence int32 - // Use this setting only when you set GOP mode control (GopSizeUnits) to - // Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP - // length using a whole number of frames or a decimal value of seconds. - // MediaConvert will interpret this value as frames or seconds depending on the - // value you choose for GOP mode control (GopSizeUnits). If you want to allow - // MediaConvert to automatically determine GOP size, leave GOP size blank and set - // GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or - // CMAF, leave GOP size blank and set GOP mode control to Auto in each output in - // your output group. + // Use this setting only when you set GOP mode control to Specified, frames or + // Specified, seconds. Specify the GOP length using a whole number of frames or a + // decimal value of seconds. MediaConvert will interpret this value as frames or + // seconds depending on the value you choose for GOP mode control. If you want to + // allow MediaConvert to automatically determine GOP size, leave GOP size blank and + // set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, + // leave GOP size blank and set GOP mode control to Auto in each output in your + // output group. GopSize float64 // Specify how the transcoder determines GOP size for this output. We recommend // that you have the transcoder automatically choose this value for you based on // characteristics of your input video. To enable this automatic behavior, choose - // Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't - // specify GOP mode control (GopSizeUnits), MediaConvert will use automatic - // behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode - // control to Auto and leave GOP size blank in each output in your output group. To - // explicitly specify the GOP length, choose Specified, frames (FRAMES) or - // Specified, seconds (SECONDS) and then provide the GOP length in the related - // setting GOP size (GopSize). + // Auto and and leave GOP size blank. By default, if you don't specify GOP mode + // control, MediaConvert will use automatic behavior. If your output group + // specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size + // blank in each output in your output group. To explicitly specify the GOP length, + // choose Specified, frames or Specified, seconds and then provide the GOP length + // in the related setting GOP size. GopSizeUnits H264GopSizeUnits // If your downstream systems have strict buffer requirements: Specify the minimum @@ -3373,39 +3209,35 @@ type H264Settings struct { HrdBufferSize int32 // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type of - // your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom - // (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the - // source. For jobs that have multiple inputs, the output field polarity might - // change over the course of the output. Follow behavior depends on the input scan - // type. If the source is interlaced, the output will be interlaced with the same - // polarity as the source. If the source is progressive, the output will be - // interlaced with top field bottom field first, depending on which of the Follow - // options you choose. + // to create a progressive output, regardless of the scan type of your input. Use + // Top field first or Bottom field first to create an output that's interlaced with + // the same field polarity throughout. Use Follow, default top or Follow, default + // bottom to produce outputs with the same field polarity as the source. For jobs + // that have multiple inputs, the output field polarity might change over the + // course of the output. Follow behavior depends on the input scan type. If the + // source is interlaced, the output will be interlaced with the same polarity as + // the source. If the source is progressive, the output will be interlaced with top + // field bottom field first, depending on which of the Follow options you choose. InterlaceMode H264InterlaceMode // Maximum bitrate in bits/second. For example, enter five megabits per second as // 5000000. Required when Rate control mode is QVBR. MaxBitrate int32 - // Use this setting only when you also enable Scene change detection - // (SceneChangeDetect). This setting determines how the encoder manages the spacing - // between I-frames that it inserts as part of the I-frame cadence and the I-frames - // that it inserts for Scene change detection. We recommend that you have the - // transcoder automatically choose this value for you based on characteristics of - // your input video. To enable this automatic behavior, keep the default value by - // leaving this setting out of your JSON job specification. In the console, do this - // by keeping the default empty value. When you explicitly specify a value for this - // setting, the encoder determines whether to skip a cadence-driven I-frame by the - // value you set. For example, if you set Min I interval (minIInterval) to 5 and a - // cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, - // then the encoder skips the cadence-driven I-frame. In this way, one GOP is - // shrunk slightly and one GOP is stretched slightly. When the cadence-driven - // I-frames are farther from the scene-change I-frame than the value you set, then - // the encoder leaves all I-frames in place and the GOPs surrounding the scene - // change are smaller than the usual cadence GOPs. + // Use this setting only when you also enable Scene change detection. This setting + // determines how the encoder manages the spacing between I-frames that it inserts + // as part of the I-frame cadence and the I-frames that it inserts for Scene change + // detection. We recommend that you have the transcoder automatically choose this + // value for you based on characteristics of your input video. To enable this + // automatic behavior, do this by keeping the default empty value. When you + // explicitly specify a value for this setting, the encoder determines whether to + // skip a cadence-driven I-frame by the value you set. For example, if you set Min + // I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a + // scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this + // way, one GOP is shrunk slightly and one GOP is stretched slightly. When the + // cadence-driven I-frames are farther from the scene-change I-frame than the value + // you set, then the encoder leaves all I-frames in place and the GOPs surrounding + // the scene change are smaller than the usual cadence GOPs. MinIInterval int32 // Specify the number of B-frames between reference frames in this output. For the @@ -3420,28 +3252,24 @@ type H264Settings struct { NumberReferenceFrames int32 // Optional. Specify how the service determines the pixel aspect ratio (PAR) for - // this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses - // the PAR from your input video for your output. To specify a different PAR in the - // console, choose any value other than Follow source. To specify a different PAR - // by editing the JSON job specification, choose SPECIFIED. When you choose - // SPECIFIED for this setting, you must also specify values for the parNumerator - // and parDenominator settings. + // this output. The default behavior, Follow source, uses the PAR from your input + // video for your output. To specify a different PAR in the console, choose any + // value other than Follow source. When you choose SPECIFIED for this setting, you + // must also specify values for the parNumerator and parDenominator settings. ParControl H264ParControl - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is 33. ParDenominator int32 - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator int32 // The Quality tuning level you choose represents a trade-off between the encoding @@ -3454,8 +3282,7 @@ type H264Settings struct { QualityTuningLevel H264QualityTuningLevel // Settings for quality-defined variable bitrate encoding with the H.265 codec. - // Use these settings only when you set QVBR for Rate control mode - // (RateControlMode). + // Use these settings only when you set QVBR for Rate control mode. QvbrSettings *H264QvbrSettings // Use this setting to specify whether this output has a variable bitrate (VBR), @@ -3466,24 +3293,23 @@ type H264Settings struct { RepeatPps H264RepeatPps // Use this setting for interlaced outputs, when your output frame rate is half of - // your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced field - // in the output. Keep the default value, Basic interlacing (INTERLACED), for all - // other output frame rates. With basic interlacing, MediaConvert performs any - // frame rate conversion first and then interlaces the frames. When you choose - // Optimized interlacing and you set your output frame rate to a value that isn't - // suitable for optimized interlacing, MediaConvert automatically falls back to - // basic interlacing. Required settings: To use optimized interlacing, you must set - // Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized - // interlacing for hard telecine outputs. You must also set Interlace mode - // (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // your input frame rate. In this situation, choose Optimized interlacing to create + // a better quality interlaced output. In this case, each progressive frame from + // the input corresponds to an interlaced field in the output. Keep the default + // value, Basic interlacing, for all other output frame rates. With basic + // interlacing, MediaConvert performs any frame rate conversion first and then + // interlaces the frames. When you choose Optimized interlacing and you set your + // output frame rate to a value that isn't suitable for optimized interlacing, + // MediaConvert automatically falls back to basic interlacing. Required settings: + // To use optimized interlacing, you must set Telecine to None or Soft. You can't + // use optimized interlacing for hard telecine outputs. You must also set Interlace + // mode to a value other than Progressive. ScanTypeConversionMode H264ScanTypeConversionMode // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. If - // this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for - // further video quality improvement. For more information about QVBR, see + // this output uses QVBR, choose Transition detection for further video quality + // improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect H264SceneChangeDetect @@ -3497,32 +3323,30 @@ type H264Settings struct { // PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio // to keep it synchronized with the video. Note that enabling this setting will // slightly reduce the duration of your video. Required settings: You must also set - // Framerate to 25. In your JSON job specification, set (framerateControl) to - // (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1. + // Framerate to 25. SlowPal H264SlowPal // Ignore this setting unless you need to comply with a specification that // requires a specific value. If you don't have a specification requirement, we // recommend that you adjust the softness of your output by using a lower value for - // the setting Sharpness (sharpness) or by enabling a noise reducer filter - // (noiseReducerFilter). The Softness (softness) setting specifies the quantization - // matrices that the encoder uses. Keep the default value, 0, for flat - // quantization. Choose the value 1 or 16 to use the default JVT softening - // quantization matricies from the H.264 specification. Choose a value from 17 to - // 128 to use planar interpolation. Increasing values from 17 to 128 result in - // increasing reduction of high-frequency data. The value 128 results in the - // softest video. + // the setting Sharpness or by enabling a noise reducer filter. The Softness + // setting specifies the quantization matrices that the encoder uses. Keep the + // default value, 0, for flat quantization. Choose the value 1 or 16 to use the + // default JVT softening quantization matricies from the H.264 specification. + // Choose a value from 17 to 128 to use planar interpolation. Increasing values + // from 17 to 128 result in increasing reduction of high-frequency data. The value + // 128 results in the softest video. Softness int32 - // Only use this setting when you change the default value, Auto (AUTO), for the - // setting H264AdaptiveQuantization. When you keep all defaults, excluding + // Only use this setting when you change the default value, Auto, for the setting + // H264AdaptiveQuantization. When you keep all defaults, excluding // H264AdaptiveQuantization and all other adaptive quantization from your JSON job // specification, MediaConvert automatically applies the best types of quantization // for your video content. When you set H264AdaptiveQuantization to a value other - // than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled - // (ENABLED). Keep this default value to adjust quantization within each frame - // based on spatial variation of content complexity. When you enable this feature, - // the encoder uses fewer bits on areas that can sustain more distortion with no + // than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled. + // Keep this default value to adjust quantization within each frame based on + // spatial variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas that can sustain more distortion with no // noticeable visual degradation and uses more bits on areas where any small // distortion will be noticeable. For example, complex textured blocks are encoded // with fewer bits and smooth textured blocks are encoded with more bits. Enabling @@ -3530,13 +3354,12 @@ type H264Settings struct { // this feature doesn't take into account where the viewer's attention is likely to // be. If viewers are likely to be focusing their attention on a part of the screen // with a lot of complex texture, you might choose to set - // H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When - // you enable spatial adaptive quantization, set the value for Adaptive - // quantization (H264AdaptiveQuantization) depending on your content. For - // homogeneous content, such as cartoons and video games, set it to Low. For - // content with a wider variety of textures, set it to High or Higher. To manually - // enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive - // quantization (H264AdaptiveQuantization) to a value other than AUTO. + // H264SpatialAdaptiveQuantization to Disabled. Related setting: When you enable + // spatial adaptive quantization, set the value for Adaptive quantization depending + // on your content. For homogeneous content, such as cartoons and video games, set + // it to Low. For content with a wider variety of textures, set it to High or + // Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must + // set Adaptive quantization to a value other than AUTO. SpatialAdaptiveQuantization H264SpatialAdaptiveQuantization // Produces a bitstream compliant with SMPTE RP-2027. @@ -3544,12 +3367,11 @@ type H264Settings struct { // When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 // fps, and your output scan type is interlaced, you can optionally enable hard or - // soft telecine to create a smoother picture. Hard telecine (HARD) produces a - // 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that - // signals to the video player device to do the conversion during play back. When - // you keep the default value, None (NONE), MediaConvert does a standard frame rate - // conversion to 29.97 without doing anything with the field polarity to create a - // smoother picture. + // soft telecine to create a smoother picture. Hard telecine produces a 29.97i + // output. Soft telecine produces an output with a 23.976 output that signals to + // the video player device to do the conversion during play back. When you keep the + // default value, None, MediaConvert does a standard frame rate conversion to 29.97 + // without doing anything with the field polarity to create a smoother picture. Telecine H264Telecine // Only use this setting when you change the default value, AUTO, for the setting @@ -3557,22 +3379,22 @@ type H264Settings struct { // H264AdaptiveQuantization and all other adaptive quantization from your JSON job // specification, MediaConvert automatically applies the best types of quantization // for your video content. When you set H264AdaptiveQuantization to a value other - // than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled - // (ENABLED). Keep this default value to adjust quantization within each frame - // based on temporal variation of content complexity. When you enable this feature, - // the encoder uses fewer bits on areas of the frame that aren't moving and uses - // more bits on complex objects with sharp edges that move a lot. For example, this + // than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled. + // Keep this default value to adjust quantization within each frame based on + // temporal variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas of the frame that aren't moving and uses more + // bits on complex objects with sharp edges that move a lot. For example, this // feature improves the readability of text tickers on newscasts and scoreboards on // sports matches. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where the // viewer's attention is likely to be. If viewers are likely to be focusing their // attention on a part of the screen that doesn't have moving objects with sharp // edges, such as sports athletes' faces, you might choose to set - // H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When - // you enable temporal quantization, adjust the strength of the filter with the - // setting Adaptive quantization (adaptiveQuantization). To manually enable or - // disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization - // (H264AdaptiveQuantization) to a value other than AUTO. + // H264TemporalAdaptiveQuantization to Disabled. Related setting: When you enable + // temporal quantization, adjust the strength of the filter with the setting + // Adaptive quantization. To manually enable or disable + // H264TemporalAdaptiveQuantization, you must set Adaptive quantization to a value + // other than AUTO. TemporalAdaptiveQuantization H264TemporalAdaptiveQuantization // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. @@ -3582,8 +3404,7 @@ type H264Settings struct { } // Settings for quality-defined variable bitrate encoding with the H.265 codec. -// Use these settings only when you set QVBR for Rate control mode -// (RateControlMode). +// Use these settings only when you set QVBR for Rate control mode. type H265QvbrSettings struct { // Use this setting only when Rate control mode is QVBR and Quality tuning level @@ -3594,18 +3415,18 @@ type H265QvbrSettings struct { // seconds of encoded output. MaxAverageBitrate int32 - // Use this setting only when you set Rate control mode (RateControlMode) to QVBR. - // Specify the target quality level for this output. MediaConvert determines the - // right number of bits to use for each part of the video to maintain the video - // quality that you specify. When you keep the default value, AUTO, MediaConvert - // picks a quality level for you, based on characteristics of your input video. If - // you prefer to specify a quality level, specify a number from 1 through 10. Use - // higher numbers for greater quality. Level 10 results in nearly lossless - // compression. The quality level for most broadcast-quality transcodes is between - // 6 and 9. Optionally, to specify a value between whole numbers, also provide a - // value for the setting qvbrQualityLevelFineTune. For example, if you want your - // QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set - // qvbrQualityLevelFineTune to .33. + // Use this setting only when you set Rate control mode to QVBR. Specify the + // target quality level for this output. MediaConvert determines the right number + // of bits to use for each part of the video to maintain the video quality that you + // specify. When you keep the default value, AUTO, MediaConvert picks a quality + // level for you, based on characteristics of your input video. If you prefer to + // specify a quality level, specify a number from 1 through 10. Use higher numbers + // for greater quality. Level 10 results in nearly lossless compression. The + // quality level for most broadcast-quality transcodes is between 6 and 9. + // Optionally, to specify a value between whole numbers, also provide a value for + // the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality + // level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to + // .33. QvbrQualityLevel int32 // Optional. Specify a value here to set the QVBR quality to a level that is @@ -3622,16 +3443,13 @@ type H265QvbrSettings struct { // Settings for H265 codec type H265Settings struct { - // When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), - // or leave blank, MediaConvert automatically applies quantization to improve the - // video quality of your output. Set Adaptive Quantization to Low (LOW), Medium - // (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control the - // strength of the quantization filter. When you do, you can specify a value for - // Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal - // Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive - // Quantization (H265FlickerAdaptiveQuantization), to further control the - // quantization filter. Set Adaptive Quantization to Off (OFF) to apply no - // quantization to your output. + // When you set Adaptive Quantization to Auto, or leave blank, MediaConvert + // automatically applies quantization to improve the video quality of your output. + // Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually + // control the strength of the quantization filter. When you do, you can specify a + // value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, and + // Flicker Adaptive Quantization, to further control the quantization filter. Set + // Adaptive Quantization to Off to apply no quantization to your output. AdaptiveQuantization H265AdaptiveQuantization // Enables Alternate Transfer Function SEI message for outputs using Hybrid Log @@ -3676,20 +3494,15 @@ type H265Settings struct { // I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must also - // set adaptiveQuantization to a value other than Off (OFF). + // set adaptiveQuantization to a value other than Off. FlickerAdaptiveQuantization H265FlickerAdaptiveQuantization - // If you are using the console, use the Framerate setting to specify the frame - // rate for this output. If you want to keep the same frame rate as the input - // video, choose Follow source. If you want to do frame rate conversion, choose a - // frame rate from the dropdown list or choose Custom. The framerates shown in the - // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // Use the Framerate setting to specify the frame rate for this output. If you + // want to keep the same frame rate as the input video, choose Follow source. If + // you want to do frame rate conversion, choose a frame rate from the dropdown list + // or choose Custom. The framerates shown in the dropdown list are decimal + // approximations of fractions. If you choose Custom, specify your frame rate as a + // fraction. FramerateControl H265FramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -3732,33 +3545,29 @@ type H265Settings struct { // example, if you want to allow four open GOPs and then require a closed GOP, set // this value to 5. We recommend that you have the transcoder automatically choose // this value for you based on characteristics of your input video. To enable this - // automatic behavior, keep the default value by leaving this setting out of your - // JSON job specification. In the console, do this by keeping the default empty - // value. If you do explicitly specify a value, for segmented outputs, don't set - // this value to 0. + // automatic behavior, do this by keeping the default empty value. If you do + // explicitly specify a value, for segmented outputs, don't set this value to 0. GopClosedCadence int32 - // Use this setting only when you set GOP mode control (GopSizeUnits) to - // Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP - // length using a whole number of frames or a decimal value of seconds. - // MediaConvert will interpret this value as frames or seconds depending on the - // value you choose for GOP mode control (GopSizeUnits). If you want to allow - // MediaConvert to automatically determine GOP size, leave GOP size blank and set - // GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or - // CMAF, leave GOP size blank and set GOP mode control to Auto in each output in - // your output group. + // Use this setting only when you set GOP mode control to Specified, frames or + // Specified, seconds. Specify the GOP length using a whole number of frames or a + // decimal value of seconds. MediaConvert will interpret this value as frames or + // seconds depending on the value you choose for GOP mode control. If you want to + // allow MediaConvert to automatically determine GOP size, leave GOP size blank and + // set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, + // leave GOP size blank and set GOP mode control to Auto in each output in your + // output group. GopSize float64 // Specify how the transcoder determines GOP size for this output. We recommend // that you have the transcoder automatically choose this value for you based on // characteristics of your input video. To enable this automatic behavior, choose - // Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't - // specify GOP mode control (GopSizeUnits), MediaConvert will use automatic - // behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode - // control to Auto and leave GOP size blank in each output in your output group. To - // explicitly specify the GOP length, choose Specified, frames (FRAMES) or - // Specified, seconds (SECONDS) and then provide the GOP length in the related - // setting GOP size (GopSize). + // Auto and and leave GOP size blank. By default, if you don't specify GOP mode + // control, MediaConvert will use automatic behavior. If your output group + // specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size + // blank in each output in your output group. To explicitly specify the GOP length, + // choose Specified, frames or Specified, seconds and then provide the GOP length + // in the related setting GOP size. GopSizeUnits H265GopSizeUnits // If your downstream systems have strict buffer requirements: Specify the minimum @@ -3775,39 +3584,35 @@ type H265Settings struct { HrdBufferSize int32 // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type of - // your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom - // (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the - // source. For jobs that have multiple inputs, the output field polarity might - // change over the course of the output. Follow behavior depends on the input scan - // type. If the source is interlaced, the output will be interlaced with the same - // polarity as the source. If the source is progressive, the output will be - // interlaced with top field bottom field first, depending on which of the Follow - // options you choose. + // to create a progressive output, regardless of the scan type of your input. Use + // Top field first or Bottom field first to create an output that's interlaced with + // the same field polarity throughout. Use Follow, default top or Follow, default + // bottom to produce outputs with the same field polarity as the source. For jobs + // that have multiple inputs, the output field polarity might change over the + // course of the output. Follow behavior depends on the input scan type. If the + // source is interlaced, the output will be interlaced with the same polarity as + // the source. If the source is progressive, the output will be interlaced with top + // field bottom field first, depending on which of the Follow options you choose. InterlaceMode H265InterlaceMode // Maximum bitrate in bits/second. For example, enter five megabits per second as // 5000000. Required when Rate control mode is QVBR. MaxBitrate int32 - // Use this setting only when you also enable Scene change detection - // (SceneChangeDetect). This setting determines how the encoder manages the spacing - // between I-frames that it inserts as part of the I-frame cadence and the I-frames - // that it inserts for Scene change detection. We recommend that you have the - // transcoder automatically choose this value for you based on characteristics of - // your input video. To enable this automatic behavior, keep the default value by - // leaving this setting out of your JSON job specification. In the console, do this - // by keeping the default empty value. When you explicitly specify a value for this - // setting, the encoder determines whether to skip a cadence-driven I-frame by the - // value you set. For example, if you set Min I interval (minIInterval) to 5 and a - // cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, - // then the encoder skips the cadence-driven I-frame. In this way, one GOP is - // shrunk slightly and one GOP is stretched slightly. When the cadence-driven - // I-frames are farther from the scene-change I-frame than the value you set, then - // the encoder leaves all I-frames in place and the GOPs surrounding the scene - // change are smaller than the usual cadence GOPs. + // Use this setting only when you also enable Scene change detection. This setting + // determines how the encoder manages the spacing between I-frames that it inserts + // as part of the I-frame cadence and the I-frames that it inserts for Scene change + // detection. We recommend that you have the transcoder automatically choose this + // value for you based on characteristics of your input video. To enable this + // automatic behavior, do this by keeping the default empty value. When you + // explicitly specify a value for this setting, the encoder determines whether to + // skip a cadence-driven I-frame by the value you set. For example, if you set Min + // I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a + // scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this + // way, one GOP is shrunk slightly and one GOP is stretched slightly. When the + // cadence-driven I-frames are farther from the scene-change I-frame than the value + // you set, then the encoder leaves all I-frames in place and the GOPs surrounding + // the scene change are smaller than the usual cadence GOPs. MinIInterval int32 // Specify the number of B-frames between reference frames in this output. For the @@ -3822,38 +3627,33 @@ type H265Settings struct { NumberReferenceFrames int32 // Optional. Specify how the service determines the pixel aspect ratio (PAR) for - // this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses - // the PAR from your input video for your output. To specify a different PAR in the - // console, choose any value other than Follow source. To specify a different PAR - // by editing the JSON job specification, choose SPECIFIED. When you choose - // SPECIFIED for this setting, you must also specify values for the parNumerator - // and parDenominator settings. + // this output. The default behavior, Follow source, uses the PAR from your input + // video for your output. To specify a different PAR, choose any value other than + // Follow source. When you choose SPECIFIED for this setting, you must also specify + // values for the parNumerator and parDenominator settings. ParControl H265ParControl - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is 33. ParDenominator int32 - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator int32 - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want - // to trade off encoding speed for output video quality. The default behavior is - // faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel H265QualityTuningLevel // Settings for quality-defined variable bitrate encoding with the H.265 codec. - // Use these settings only when you set QVBR for Rate control mode - // (RateControlMode). + // Use these settings only when you set QVBR for Rate control mode. QvbrSettings *H265QvbrSettings // Use this setting to specify whether this output has a variable bitrate (VBR), @@ -3865,24 +3665,23 @@ type H265Settings struct { SampleAdaptiveOffsetFilterMode H265SampleAdaptiveOffsetFilterMode // Use this setting for interlaced outputs, when your output frame rate is half of - // your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced field - // in the output. Keep the default value, Basic interlacing (INTERLACED), for all - // other output frame rates. With basic interlacing, MediaConvert performs any - // frame rate conversion first and then interlaces the frames. When you choose - // Optimized interlacing and you set your output frame rate to a value that isn't - // suitable for optimized interlacing, MediaConvert automatically falls back to - // basic interlacing. Required settings: To use optimized interlacing, you must set - // Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized - // interlacing for hard telecine outputs. You must also set Interlace mode - // (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // your input frame rate. In this situation, choose Optimized interlacing to create + // a better quality interlaced output. In this case, each progressive frame from + // the input corresponds to an interlaced field in the output. Keep the default + // value, Basic interlacing, for all other output frame rates. With basic + // interlacing, MediaConvert performs any frame rate conversion first and then + // interlaces the frames. When you choose Optimized interlacing and you set your + // output frame rate to a value that isn't suitable for optimized interlacing, + // MediaConvert automatically falls back to basic interlacing. Required settings: + // To use optimized interlacing, you must set Telecine to None or Soft. You can't + // use optimized interlacing for hard telecine outputs. You must also set Interlace + // mode to a value other than Progressive. ScanTypeConversionMode H265ScanTypeConversionMode // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. If - // this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for - // further video quality improvement. For more information about QVBR, see + // this output uses QVBR, choose Transition detection for further video quality + // improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect H265SceneChangeDetect @@ -3896,48 +3695,45 @@ type H265Settings struct { // PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio // to keep it synchronized with the video. Note that enabling this setting will // slightly reduce the duration of your video. Required settings: You must also set - // Framerate to 25. In your JSON job specification, set (framerateControl) to - // (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1. + // Framerate to 25. SlowPal H265SlowPal - // Keep the default value, Enabled (ENABLED), to adjust quantization within each - // frame based on spatial variation of content complexity. When you enable this - // feature, the encoder uses fewer bits on areas that can sustain more distortion - // with no noticeable visual degradation and uses more bits on areas where any - // small distortion will be noticeable. For example, complex textured blocks are - // encoded with fewer bits and smooth textured blocks are encoded with more bits. - // Enabling this feature will almost always improve your video quality. Note, - // though, that this feature doesn't take into account where the viewer's attention - // is likely to be. If viewers are likely to be focusing their attention on a part - // of the screen with a lot of complex texture, you might choose to disable this - // feature. Related setting: When you enable spatial adaptive quantization, set the - // value for Adaptive quantization (adaptiveQuantization) depending on your - // content. For homogeneous content, such as cartoons and video games, set it to - // Low. For content with a wider variety of textures, set it to High or Higher. + // Keep the default value, Enabled, to adjust quantization within each frame based + // on spatial variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas that can sustain more distortion with no + // noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, that + // this feature doesn't take into account where the viewer's attention is likely to + // be. If viewers are likely to be focusing their attention on a part of the screen + // with a lot of complex texture, you might choose to disable this feature. Related + // setting: When you enable spatial adaptive quantization, set the value for + // Adaptive quantization depending on your content. For homogeneous content, such + // as cartoons and video games, set it to Low. For content with a wider variety of + // textures, set it to High or Higher. SpatialAdaptiveQuantization H265SpatialAdaptiveQuantization - // This field applies only if the Streams > Advanced > Framerate (framerate) field - // is set to 29.970. This field works with the Streams > Advanced > Preprocessors > - // Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced - // Mode field (interlace_mode) to identify the scan type for the output: - // Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i - // output from 23.976 input. - Soft: produces 23.976; the player converts this - // output to 29.97i. + // This field applies only if the Streams > Advanced > Framerate field is set to + // 29.970. This field works with the Streams > Advanced > Preprocessors > + // Deinterlacer field and the Streams > Advanced > Interlaced Mode field to + // identify the scan type for the output: Progressive, Interlaced, Hard Telecine or + // Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: + // produces 23.976; the player converts this output to 29.97i. Telecine H265Telecine - // Keep the default value, Enabled (ENABLED), to adjust quantization within each - // frame based on temporal variation of content complexity. When you enable this - // feature, the encoder uses fewer bits on areas of the frame that aren't moving - // and uses more bits on complex objects with sharp edges that move a lot. For - // example, this feature improves the readability of text tickers on newscasts and - // scoreboards on sports matches. Enabling this feature will almost always improve - // your video quality. Note, though, that this feature doesn't take into account - // where the viewer's attention is likely to be. If viewers are likely to be - // focusing their attention on a part of the screen that doesn't have moving - // objects with sharp edges, such as sports athletes' faces, you might choose to - // disable this feature. Related setting: When you enable temporal quantization, - // adjust the strength of the filter with the setting Adaptive quantization - // (adaptiveQuantization). + // Keep the default value, Enabled, to adjust quantization within each frame based + // on temporal variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas of the frame that aren't moving and uses more + // bits on complex objects with sharp edges that move a lot. For example, this + // feature improves the readability of text tickers on newscasts and scoreboards on + // sports matches. Enabling this feature will almost always improve your video + // quality. Note, though, that this feature doesn't take into account where the + // viewer's attention is likely to be. If viewers are likely to be focusing their + // attention on a part of the screen that doesn't have moving objects with sharp + // edges, such as sports athletes' faces, you might choose to disable this feature. + // Related setting: When you enable temporal quantization, adjust the strength of + // the filter with the setting Adaptive quantization. TemporalAdaptiveQuantization H265TemporalAdaptiveQuantization // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers are @@ -4134,10 +3930,7 @@ type HlsEncryptionSettings struct { } // Settings related to your HLS output package. For more information, see -// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When -// you work directly in your JSON job specification, include this object and any -// required children when you set Type, under OutputGroupSettings, to -// HLS_GROUP_SETTINGS. +// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. type HlsGroupSettings struct { // Choose one or more ad marker types to decorate your Apple HLS manifest. This @@ -4152,9 +3945,9 @@ type HlsGroupSettings struct { AdditionalManifests []HlsAdditionalManifest // Ignore this setting unless you are using FairPlay DRM with Verimatrix and you - // encounter playback issues. Keep the default value, Include (INCLUDE), to output - // audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers - // from your audio segments. + // encounter playback issues. Keep the default value, Include, to output audio-only + // headers. Choose Exclude to remove the audio-only headers from your audio + // segments. AudioOnlyHeader HlsAudioOnlyHeader // A partial URI prefix that will be prepended to each output in the media .m3u8 @@ -4176,28 +3969,26 @@ type HlsGroupSettings struct { // from the manifest. CaptionLanguageSetting HlsCaptionLanguageSetting - // Set Caption segment length control (CaptionSegmentLengthControl) to Match video - // (MATCH_VIDEO) to create caption segments that align with the video segments from - // the first video output in this output group. For example, if the video segments - // are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the - // default setting, Large segments (LARGE_SEGMENTS) to create caption segments that - // are 300 seconds long. + // Set Caption segment length control to Match video to create caption segments + // that align with the video segments from the first video output in this output + // group. For example, if the video segments are 2 seconds long, your WebVTT + // segments will also be 2 seconds long. Keep the default setting, Large segments + // to create caption segments that are 300 seconds long. CaptionSegmentLengthControl HlsCaptionSegmentLengthControl // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no - // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in - // your video distribution set up. For example, use the Cache-Control http header. + // tag. Otherwise, keep the default value Enabled and control caching in your video + // distribution set up. For example, use the Cache-Control http header. ClientCache HlsClientCache // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. CodecSpecification HlsCodecSpecification - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify the - // base filename in the URI, the service will use the filename of the input file. - // If your job has multiple inputs, the service uses the filename of the first - // input file. + // Use Destination to specify the S3 output location and the output filename base. + // Destination accepts format identifiers. If you do not specify the base filename + // in the URI, the service will use the filename of the input file. If your job has + // multiple inputs, the service uses the filename of the first input file. Destination *string // Settings associated with the destination. Will vary based on the type of @@ -4211,14 +4002,13 @@ type HlsGroupSettings struct { Encryption *HlsEncryptionSettings // Specify whether MediaConvert generates images for trick play. Keep the default - // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to - // generate tiled thumbnails. Choose Thumbnail and full frame - // (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution - // images of single frames. MediaConvert creates a child manifest for each set of - // images that you generate and adds corresponding entries to the parent manifest. - // A common application for these images is Roku trick mode. The thumbnails and - // full-frame images that MediaConvert creates with this feature are compatible - // with this Roku specification: + // value, None, to not generate any images. Choose Thumbnail to generate tiled + // thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and + // full-resolution images of single frames. MediaConvert creates a child manifest + // for each set of images that you generate and adds corresponding entries to the + // parent manifest. A common application for these images is Roku trick mode. The + // thumbnails and full-frame images that MediaConvert creates with this feature are + // compatible with this Roku specification: // https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay HlsImageBasedTrickPlay @@ -4280,17 +4070,15 @@ type HlsGroupSettings struct { // Specify the length, in whole seconds, of each segment. When you don't specify a // value, MediaConvert defaults to 10. Related settings: Use Segment length control - // (SegmentLengthControl) to specify whether the encoder enforces this value - // strictly. Use Segment control (HlsSegmentControl) to specify whether - // MediaConvert creates separate segment files or one content file that has - // metadata to mark the segment boundaries. + // to specify whether the encoder enforces this value strictly. Use Segment control + // to specify whether MediaConvert creates separate segment files or one content + // file that has metadata to mark the segment boundaries. SegmentLength int32 // Specify how you want MediaConvert to determine the segment length. Choose Exact - // (EXACT) to have the encoder use the exact length that you specify with the - // setting Segment length (SegmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // to have the encoder use the exact length that you specify with the setting + // Segment length. This might result in extra I-frames. Choose Multiple of GOP to + // have the encoder round up the segment lengths to match the next GOP boundary. SegmentLengthControl HlsSegmentLengthControl // Specify the number of segments to write to a subdirectory before starting a new @@ -4313,19 +4101,16 @@ type HlsGroupSettings struct { // duration. TargetDurationCompatibilityMode HlsTargetDurationCompatibilityMode - // Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 - // timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: - // Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to - // Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame - // type to None (NONE). + // Specify the type of the ID3 frame to use for ID3 timestamps in your output. To + // include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to + // Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None. TimedMetadataId3Frame HlsTimedMetadataId3Frame // Specify the interval in seconds to write ID3 timestamps in your output. The // first timestamp starts at the output timecode and date, and increases // incrementally with each ID3 timestamp. To use the default interval of 10 // seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp - // frame type (timedMetadataId3Frame) to PRIV (PRIV) or TDRL (TDRL), and set ID3 - // metadata (timedMetadata) to Passthrough (PASSTHROUGH). + // frame type to PRIV or TDRL, and set ID3 metadata to Passthrough. TimedMetadataId3Period int32 // Provides an extra millisecond delta offset to fine tune the timestamps. @@ -4400,9 +4185,9 @@ type HlsSettings struct { // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream // (M2TS) to create a file in an MPEG2-TS container. Keep the default value - // Automatic (AUTOMATIC) to create an audio-only file in a raw container. - // Regardless of the value that you specify here, if this output has video, the - // service will place the output into an MPEG2-TS container. + // Automatic to create an audio-only file in a raw container. Regardless of the + // value that you specify here, if this output has video, the service will place + // the output into an MPEG2-TS container. AudioOnlyContainer HlsAudioOnlyContainer // List all the audio groups that are used with the video output stream. Input all @@ -4423,21 +4208,19 @@ type HlsSettings struct { AudioTrackType HlsAudioTrackType // Specify whether to flag this audio track as descriptive video service (DVS) in - // your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the + // your HLS parent manifest. When you choose Flag, MediaConvert includes the // parameter CHARACTERISTICS="public.accessibility.describes-video" in the - // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag - // (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with - // accessibility on Apple devices. For more information, see the Apple - // documentation. + // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, + // MediaConvert leaves this parameter out. The DVS flag can help with accessibility + // on Apple devices. For more information, see the Apple documentation. DescriptiveVideoServiceFlag HlsDescriptiveVideoServiceFlag - // Choose Include (INCLUDE) to have MediaConvert generate a child manifest that - // lists only the I-frames for this rendition, in addition to your regular manifest - // for this rendition. You might use this manifest as part of a workflow that - // creates preview functions for your video. MediaConvert adds both the I-frame - // only child manifest and the regular child manifest to the parent manifest. When - // you don't need the I-frame only child manifest, keep the default value Exclude - // (EXCLUDE). + // Choose Include to have MediaConvert generate a child manifest that lists only + // the I-frames for this rendition, in addition to your regular manifest for this + // rendition. You might use this manifest as part of a workflow that creates + // preview functions for your video. MediaConvert adds both the I-frame only child + // manifest and the regular child manifest to the parent manifest. When you don't + // need the I-frame only child manifest, keep the default value Exclude. IFrameOnlyManifest HlsIFrameOnlyManifest // Use this setting to add an identifying string to the filename of each segment. @@ -4473,16 +4256,16 @@ type HopDestination struct { noSmithyDocumentSerde } -// To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to -// specify the base 64 encoded string and use Timecode (TimeCode) to specify the -// time when the tag should be inserted. To insert multiple ID3 tags in your -// output, create multiple instances of ID3 insertion (Id3Insertion). +// To insert ID3 tags in your output, specify two values. Use ID3 tag to specify +// the base 64 encoded string and use Timecode to specify the time when the tag +// should be inserted. To insert multiple ID3 tags in your output, create multiple +// instances of ID3 insertion. type Id3Insertion struct { - // Use ID3 tag (Id3) to provide a fully formed ID3 tag in base64-encode format. + // Use ID3 tag to provide a fully formed ID3 tag in base64-encode format. Id3 *string - // Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format. + // Provide a Timecode in HH:MM:SS:FF or HH:MM:SS;FF format. Timecode *string noSmithyDocumentSerde @@ -4514,8 +4297,6 @@ type ImageInserter struct { // the same output group, but different output from your video. For more // information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to IMSC. type ImscDestinationSettings struct { // If the IMSC captions track is intended to provide accessibility for people who @@ -4566,28 +4347,26 @@ type Input struct { AdvancedInputFilterSettings *AdvancedInputFilterSettings // Use audio selector groups to combine multiple sidecar audio inputs so that you - // can assign them to a single output audio tab (AudioDescription). Note that, if - // you're working with embedded audio, it's simpler to assign multiple input tracks - // into a single audio selector rather than use an audio selector group. + // can assign them to a single output audio tab. Note that, if you're working with + // embedded audio, it's simpler to assign multiple input tracks into a single audio + // selector rather than use an audio selector group. AudioSelectorGroups map[string]AudioSelectorGroup - // Use Audio selectors (AudioSelectors) to specify a track or set of tracks from - // the input that you will use in your outputs. You can use multiple Audio - // selectors per input. + // Use Audio selectors to specify a track or set of tracks from the input that you + // will use in your outputs. You can use multiple Audio selectors per input. AudioSelectors map[string]AudioSelector // Use captions selectors to specify the captions data from your input that you // use in your outputs. You can use up to 100 captions selectors per input. CaptionSelectors map[string]CaptionSelector - // Use Cropping selection (crop) to specify the video area that the service will - // include in the output video frame. If you specify a value here, it will override - // any value that you specify in the output setting Cropping selection (crop). + // Use Cropping selection to specify the video area that the service will include + // in the output video frame. If you specify a value here, it will override any + // value that you specify in the output setting Cropping selection. Crop *Rectangle - // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manually controllable for MPEG2 and uncompressed video - // inputs. + // Enable Deblock to produce smoother motion in the output. Default is disabled. + // Only manually controllable for MPEG2 and uncompressed video inputs. DeblockFilter InputDeblockFilter // Settings for decrypting any input files that you encrypt before you upload them @@ -4596,8 +4375,8 @@ type Input struct { // content. DecryptionSettings *InputDecryptionSettings - // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is - // disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs. + // Enable Denoise to filter noise from the input. Default is disabled. Only + // applicable to MPEG2, H.264, H.265, and uncompressed video inputs. DenoiseFilter InputDenoiseFilter // Use this setting only when your video source has Dolby Vision studio mastering @@ -4616,8 +4395,8 @@ type Input struct { // specify them in the job, to create the outputs. If your input format is IMF, // specify your input by providing the path to your CPL. For example, // "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use - // Supplemental IMPs (SupplementalImps) to specify any supplemental IMPs that - // contain assets referenced by the CPL. + // Supplemental IMPs to specify any supplemental IMPs that contain assets + // referenced by the CPL. FileInput *string // Specify whether to apply input filtering to improve the video quality of your @@ -4640,42 +4419,42 @@ type Input struct { // disabled by default. ImageInserter *ImageInserter - // (InputClippings) contains sets of start and end times that together specify a - // portion of the input to be used in the outputs. If you provide only a start - // time, the clip will be the entire input from that point to the end. If you - // provide only an end time, it will be the entire input up to that point. When you - // specify more than one input clip, the transcoding service creates the job - // outputs by stringing the clips together in the order you specify them. + // Contains sets of start and end times that together specify a portion of the + // input to be used in the outputs. If you provide only a start time, the clip will + // be the entire input from that point to the end. If you provide only an end time, + // it will be the entire input up to that point. When you specify more than one + // input clip, the transcoding service creates the job outputs by stringing the + // clips together in the order you specify them. InputClippings []InputClipping // When you have a progressive segmented frame (PsF) input, use this setting to // flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, // flagging your input as PsF results in better preservation of video quality when // you do deinterlacing and frame rate conversion. If you don't specify, the - // default value is Auto (AUTO). Auto is the correct setting for all inputs that - // are not PsF. Don't set this value to PsF when your input is interlaced. Doing so - // creates horizontal interlacing artifacts. + // default value is Auto. Auto is the correct setting for all inputs that are not + // PsF. Don't set this value to PsF when your input is interlaced. Doing so creates + // horizontal interlacing artifacts. InputScanType InputScanType - // Use Selection placement (position) to define the video area in your output - // frame. The area outside of the rectangle that you specify here is black. If you - // specify a value here, it will override any value that you specify in the output - // setting Selection placement (position). If you specify a value here, this will - // override any AFD values in your input, even if you set Respond to AFD - // (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will - // ignore anything that you specify for the setting Scaling Behavior - // (scalingBehavior). + // Use Selection placement to define the video area in your output frame. The area + // outside of the rectangle that you specify here is black. If you specify a value + // here, it will override any value that you specify in the output setting + // Selection placement. If you specify a value here, this will override any AFD + // values in your input, even if you set Respond to AFD to Respond. If you specify + // a value here, this will ignore anything that you specify for the setting Scaling + // Behavior. Position *Rectangle - // Use Program (programNumber) to select a specific program from within a - // multi-program transport stream. Note that Quad 4K is not currently supported. - // Default is the first program within the transport stream. If the program you - // specify doesn't exist, the transcoding service will use this default. + // Use Program to select a specific program from within a multi-program transport + // stream. Note that Quad 4K is not currently supported. Default is the first + // program within the transport stream. If the program you specify doesn't exist, + // the transcoding service will use this default. ProgramNumber int32 - // Set PSI control (InputPsiControl) for transport stream inputs to specify which - // data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and - // video. * Use PSI - Scan only PSI data. + // Set PSI control for transport stream inputs to specify which data the demux + // process to scans. + // - Ignore PSI - Scan all PIDs for audio and video. + // - Use PSI - Scan only PSI data. PsiControl InputPsiControl // Provide a list of any necessary supplemental IMPs. You need supplemental IMPs @@ -4686,24 +4465,21 @@ type Input struct { // your input CPL, because the service automatically detects it. SupplementalImps []string - // Use this Timecode source setting, located under the input settings - // (InputTimecodeSource), to specify how the service counts input video frames. - // This input frame count affects only the behavior of features that apply to a - // single input at a time, such as input clipping and synchronizing some captions - // formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. - // Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose - // Specified start (SPECIFIEDSTART) to start the first frame at the timecode that - // you specify in the setting Start timecode (timecodeStart). If you don't specify - // a value for Timecode source, the service will use Embedded by default. For more - // information about timecodes, see - // https://docs.aws.amazon.com/console/mediaconvert/timecode. + // Use this Timecode source setting, located under the input settings, to specify + // how the service counts input video frames. This input frame count affects only + // the behavior of features that apply to a single input at a time, such as input + // clipping and synchronizing some captions formats. Choose Embedded to use the + // timecodes in your input video. Choose Start at zero to start the first frame at + // zero. Choose Specified start to start the first frame at the timecode that you + // specify in the setting Start timecode. If you don't specify a value for Timecode + // source, the service will use Embedded by default. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource InputTimecodeSource // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located - // under the input settings (InputTimecodeSource), to Specified start - // (SPECIFIEDSTART). For more information about timecodes, see - // https://docs.aws.amazon.com/console/mediaconvert/timecode. + // under the input settings, to Specified start. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string // When you include Video generator, MediaConvert creates a video input with black @@ -4727,25 +4503,24 @@ type Input struct { // https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html. type InputClipping struct { - // Set End timecode (EndTimecode) to the end of the portion of the input you are - // clipping. The frame corresponding to the End timecode value is included in the + // Set End timecode to the end of the portion of the input you are clipping. The + // frame corresponding to the End timecode value is included in the clip. Start + // timecode or End timecode may be left blank, but not both. Use the format + // HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the + // second, and FF is the frame number. When choosing this value, take into account + // your setting for timecode source under input settings. For example, if you have + // embedded timecodes that start at 01:00:00:00 and you want your clip to end six + // minutes into the video, use 01:06:00:00. + EndTimecode *string + + // Set Start timecode to the beginning of the portion of the input you are + // clipping. The frame corresponding to the Start timecode value is included in the // clip. Start timecode or End timecode may be left blank, but not both. Use the // format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is // the second, and FF is the frame number. When choosing this value, take into - // account your setting for timecode source under input settings - // (InputTimecodeSource). For example, if you have embedded timecodes that start at - // 01:00:00:00 and you want your clip to end six minutes into the video, use - // 01:06:00:00. - EndTimecode *string - - // Set Start timecode (StartTimecode) to the beginning of the portion of the input - // you are clipping. The frame corresponding to the Start timecode value is - // included in the clip. Start timecode or End timecode may be left blank, but not - // both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the - // minute, SS is the second, and FF is the frame number. When choosing this value, - // take into account your setting for Input timecode source. For example, if you - // have embedded timecodes that start at 01:00:00:00 and you want your clip to - // begin five minutes into the video, use 01:05:00:00. + // account your setting for Input timecode source. For example, if you have + // embedded timecodes that start at 01:00:00:00 and you want your clip to begin + // five minutes into the video, use 01:05:00:00. StartTimecode *string noSmithyDocumentSerde @@ -4803,32 +4578,30 @@ type InputTemplate struct { AdvancedInputFilterSettings *AdvancedInputFilterSettings // Use audio selector groups to combine multiple sidecar audio inputs so that you - // can assign them to a single output audio tab (AudioDescription). Note that, if - // you're working with embedded audio, it's simpler to assign multiple input tracks - // into a single audio selector rather than use an audio selector group. + // can assign them to a single output audio tab. Note that, if you're working with + // embedded audio, it's simpler to assign multiple input tracks into a single audio + // selector rather than use an audio selector group. AudioSelectorGroups map[string]AudioSelectorGroup - // Use Audio selectors (AudioSelectors) to specify a track or set of tracks from - // the input that you will use in your outputs. You can use multiple Audio - // selectors per input. + // Use Audio selectors to specify a track or set of tracks from the input that you + // will use in your outputs. You can use multiple Audio selectors per input. AudioSelectors map[string]AudioSelector // Use captions selectors to specify the captions data from your input that you // use in your outputs. You can use up to 100 captions selectors per input. CaptionSelectors map[string]CaptionSelector - // Use Cropping selection (crop) to specify the video area that the service will - // include in the output video frame. If you specify a value here, it will override - // any value that you specify in the output setting Cropping selection (crop). + // Use Cropping selection to specify the video area that the service will include + // in the output video frame. If you specify a value here, it will override any + // value that you specify in the output setting Cropping selection. Crop *Rectangle - // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manually controllable for MPEG2 and uncompressed video - // inputs. + // Enable Deblock to produce smoother motion in the output. Default is disabled. + // Only manually controllable for MPEG2 and uncompressed video inputs. DeblockFilter InputDeblockFilter - // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is - // disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs. + // Enable Denoise to filter noise from the input. Default is disabled. Only + // applicable to MPEG2, H.264, H.265, and uncompressed video inputs. DenoiseFilter InputDenoiseFilter // Use this setting only when your video source has Dolby Vision studio mastering @@ -4862,62 +4635,59 @@ type InputTemplate struct { // disabled by default. ImageInserter *ImageInserter - // (InputClippings) contains sets of start and end times that together specify a - // portion of the input to be used in the outputs. If you provide only a start - // time, the clip will be the entire input from that point to the end. If you - // provide only an end time, it will be the entire input up to that point. When you - // specify more than one input clip, the transcoding service creates the job - // outputs by stringing the clips together in the order you specify them. + // Contains sets of start and end times that together specify a portion of the + // input to be used in the outputs. If you provide only a start time, the clip will + // be the entire input from that point to the end. If you provide only an end time, + // it will be the entire input up to that point. When you specify more than one + // input clip, the transcoding service creates the job outputs by stringing the + // clips together in the order you specify them. InputClippings []InputClipping // When you have a progressive segmented frame (PsF) input, use this setting to // flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, // flagging your input as PsF results in better preservation of video quality when // you do deinterlacing and frame rate conversion. If you don't specify, the - // default value is Auto (AUTO). Auto is the correct setting for all inputs that - // are not PsF. Don't set this value to PsF when your input is interlaced. Doing so - // creates horizontal interlacing artifacts. + // default value is Auto. Auto is the correct setting for all inputs that are not + // PsF. Don't set this value to PsF when your input is interlaced. Doing so creates + // horizontal interlacing artifacts. InputScanType InputScanType - // Use Selection placement (position) to define the video area in your output - // frame. The area outside of the rectangle that you specify here is black. If you - // specify a value here, it will override any value that you specify in the output - // setting Selection placement (position). If you specify a value here, this will - // override any AFD values in your input, even if you set Respond to AFD - // (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will - // ignore anything that you specify for the setting Scaling Behavior - // (scalingBehavior). + // Use Selection placement to define the video area in your output frame. The area + // outside of the rectangle that you specify here is black. If you specify a value + // here, it will override any value that you specify in the output setting + // Selection placement. If you specify a value here, this will override any AFD + // values in your input, even if you set Respond to AFD to Respond. If you specify + // a value here, this will ignore anything that you specify for the setting Scaling + // Behavior. Position *Rectangle - // Use Program (programNumber) to select a specific program from within a - // multi-program transport stream. Note that Quad 4K is not currently supported. - // Default is the first program within the transport stream. If the program you - // specify doesn't exist, the transcoding service will use this default. + // Use Program to select a specific program from within a multi-program transport + // stream. Note that Quad 4K is not currently supported. Default is the first + // program within the transport stream. If the program you specify doesn't exist, + // the transcoding service will use this default. ProgramNumber int32 - // Set PSI control (InputPsiControl) for transport stream inputs to specify which - // data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and - // video. * Use PSI - Scan only PSI data. + // Set PSI control for transport stream inputs to specify which data the demux + // process to scans. + // - Ignore PSI - Scan all PIDs for audio and video. + // - Use PSI - Scan only PSI data. PsiControl InputPsiControl - // Use this Timecode source setting, located under the input settings - // (InputTimecodeSource), to specify how the service counts input video frames. - // This input frame count affects only the behavior of features that apply to a - // single input at a time, such as input clipping and synchronizing some captions - // formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. - // Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose - // Specified start (SPECIFIEDSTART) to start the first frame at the timecode that - // you specify in the setting Start timecode (timecodeStart). If you don't specify - // a value for Timecode source, the service will use Embedded by default. For more - // information about timecodes, see - // https://docs.aws.amazon.com/console/mediaconvert/timecode. + // Use this Timecode source setting, located under the input settings, to specify + // how the service counts input video frames. This input frame count affects only + // the behavior of features that apply to a single input at a time, such as input + // clipping and synchronizing some captions formats. Choose Embedded to use the + // timecodes in your input video. Choose Start at zero to start the first frame at + // zero. Choose Specified start to start the first frame at the timecode that you + // specify in the setting Start timecode. If you don't specify a value for Timecode + // source, the service will use Embedded by default. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource InputTimecodeSource // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located - // under the input settings (InputTimecodeSource), to Specified start - // (SPECIFIEDSTART). For more information about timecodes, see - // https://docs.aws.amazon.com/console/mediaconvert/timecode. + // under the input settings, to Specified start. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string // Input video selectors contain the video settings for the input. Each of your @@ -4986,8 +4756,8 @@ type InsertableImage struct { // Layer appear on top of images with lower values for Layer. Layer int32 - // Use Opacity (Opacity) to specify how much of the underlying video shows through - // the inserted image. 0 is transparent and 100 is fully opaque. Default is 50. + // Use Opacity to specify how much of the underlying video shows through the + // inserted image. 0 is transparent and 100 is fully opaque. Default is 50. Opacity int32 // Specify the timecode of the frame that you want the overlay to first appear on. @@ -5177,8 +4947,8 @@ type JobSettings struct { // Content Advisory. ExtendedDataServices *ExtendedDataServices - // Use Inputs (inputs) to define source file used in the transcode job. There can - // be multiple inputs add in a job. These inputs will be concantenated together to + // Use Inputs to define source file used in the transcode job. There can be + // multiple inputs add in a job. These inputs will be concantenated together to // create the output. Inputs []Input @@ -5197,12 +4967,8 @@ type JobSettings struct { MotionImageInserter *MotionImageInserter // Settings for your Nielsen configuration. If you don't do Nielsen measurement - // and analytics, ignore these settings. When you enable Nielsen configuration - // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs - // in the job. To enable Nielsen configuration programmatically, include an - // instance of nielsenConfiguration in your JSON job specification. Even if you - // don't include any children of nielsenConfiguration, you still enable the - // setting. + // and analytics, ignore these settings. When you enable Nielsen configuration, + // MediaConvert enables PCM to ID3 tagging for all outputs in the job. NielsenConfiguration *NielsenConfiguration // Ignore these settings unless you are using Nielsen non-linear watermarking. @@ -5214,25 +4980,23 @@ type JobSettings struct { // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings - // (OutputGroups) contains one group of settings for each set of outputs that - // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, - // MXF, and no container) are grouped in a single output group as well. Required in - // (OutputGroups) is a group of settings that apply to the whole group. This - // required object depends on the value you set for (Type) under - // (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as - // follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, - // HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * - // MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, - // CmafGroupSettings + // Contains one group of settings for each set of outputs that share a common + // package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no + // container) are grouped in a single output group as well. Required in is a group + // of settings that apply to the whole group. This required object depends on the + // value you set for Type. Type, settings object pairs are as follows. * + // FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * + // DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, + // MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings OutputGroups []OutputGroup // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. TimecodeConfig *TimecodeConfig - // Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you - // specify. In each output that you want to include this metadata, you must set ID3 - // metadata (timedMetadata) to Passthrough (PASSTHROUGH). + // Insert user-defined custom ID3 metadata at timecodes that you specify. In each + // output that you want to include this metadata, you must set ID3 metadata to + // Passthrough. TimedMetadataInsertion *TimedMetadataInsertion noSmithyDocumentSerde @@ -5319,9 +5083,9 @@ type JobTemplateSettings struct { // Content Advisory. ExtendedDataServices *ExtendedDataServices - // Use Inputs (inputs) to define the source file used in the transcode job. There - // can only be one input in a job template. Using the API, you can include multiple - // inputs when referencing a job template. + // Use Inputs to define the source file used in the transcode job. There can only + // be one input in a job template. Using the API, you can include multiple inputs + // when referencing a job template. Inputs []InputTemplate // Use these settings only when you use Kantar watermarking. Specify the values @@ -5339,12 +5103,8 @@ type JobTemplateSettings struct { MotionImageInserter *MotionImageInserter // Settings for your Nielsen configuration. If you don't do Nielsen measurement - // and analytics, ignore these settings. When you enable Nielsen configuration - // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs - // in the job. To enable Nielsen configuration programmatically, include an - // instance of nielsenConfiguration in your JSON job specification. Even if you - // don't include any children of nielsenConfiguration, you still enable the - // setting. + // and analytics, ignore these settings. When you enable Nielsen configuration, + // MediaConvert enables PCM to ID3 tagging for all outputs in the job. NielsenConfiguration *NielsenConfiguration // Ignore these settings unless you are using Nielsen non-linear watermarking. @@ -5356,25 +5116,23 @@ type JobTemplateSettings struct { // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings - // (OutputGroups) contains one group of settings for each set of outputs that - // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, - // MXF, and no container) are grouped in a single output group as well. Required in - // (OutputGroups) is a group of settings that apply to the whole group. This - // required object depends on the value you set for (Type) under - // (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as - // follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, - // HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * - // MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, - // CmafGroupSettings + // Contains one group of settings for each set of outputs that share a common + // package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no + // container) are grouped in a single output group as well. Required in is a group + // of settings that apply to the whole group. This required object depends on the + // value you set for Type. Type, settings object pairs are as follows. * + // FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * + // DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, + // MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings OutputGroups []OutputGroup // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. TimecodeConfig *TimecodeConfig - // Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you - // specify. In each output that you want to include this metadata, you must set ID3 - // metadata (timedMetadata) to Passthrough (PASSTHROUGH). + // Insert user-defined custom ID3 metadata at timecodes that you specify. In each + // output that you want to include this metadata, you must set ID3 metadata to + // Passthrough. TimedMetadataInsertion *TimedMetadataInsertion noSmithyDocumentSerde @@ -5455,7 +5213,7 @@ type KantarWatermarkSettings struct { // Settings for SCTE-35 signals from ESAM. Include this in your job settings to // put SCTE-35 markers in your HLS and transport stream outputs at the insertion // points that you specify in an ESAM XML document. Provide the document in the -// setting SCC XML (sccXml). +// setting SCC XML. type M2tsScte35Esam struct { // Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated @@ -5466,15 +5224,14 @@ type M2tsScte35Esam struct { } // MPEG-2 TS container settings. These apply to outputs in a File output group -// when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). -// In these assets, data is organized by the program map table (PMT). Each -// transport stream program contains subsets of data, including audio, video, and -// metadata. Each of these subsets of data has a numerical label called a packet -// identifier (PID). Each transport stream program corresponds to one MediaConvert -// output. The PMT lists the types of data in a program along with their PID. -// Downstream systems and players use the program map table to look up the PID for -// each type of data it accesses and then uses the PIDs to locate specific data -// within the asset. +// when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, +// data is organized by the program map table (PMT). Each transport stream program +// contains subsets of data, including audio, video, and metadata. Each of these +// subsets of data has a numerical label called a packet identifier (PID). Each +// transport stream program corresponds to one MediaConvert output. The PMT lists +// the types of data in a program along with their PID. Downstream systems and +// players use the program map table to look up the PID for each type of data it +// accesses and then uses the PIDs to locate specific data within the asset. type M2tsSettings struct { // Selects between the DVB and ATSC buffer models for Dolby Digital audio. @@ -5482,17 +5239,16 @@ type M2tsSettings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to ensure - // that the total duration of each audio stream is at least as long as the total - // duration of the video stream. After padding or trimming, the audio stream - // duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of the - // output. For unsegmented outputs, MediaConvert adds padding only to the end of - // the file. When you keep the default value, any minor discrepancies between audio - // and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. In all + // other cases, keep the default value, Default codec duration. When you choose + // Match video duration, MediaConvert pads the output audio streams with silence or + // trims them to ensure that the total duration of each audio stream is at least as + // long as the total duration of the video stream. After padding or trimming, the + // audio stream duration is no more than one frame longer than the video stream. + // MediaConvert applies audio padding or trimming only to the end of the last + // segment of the output. For unsegmented outputs, MediaConvert adds padding only + // to the end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. AudioDuration M2tsAudioDuration // The number of audio frames to insert for each PES packet. @@ -5517,19 +5273,15 @@ type M2tsSettings struct { // If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets // with Presentation Timestamp (PTS) values greater than or equal to the first // video packet PTS (MediaConvert drops captions and data packets with lesser PTS - // values). Keep the default value (AUTO) to allow all PTS values. + // values). Keep the default value to allow all PTS values. DataPTSControl M2tsDataPtsControl // Use these settings to insert a DVB Network Information Table (NIT) in the - // transport stream of this output. When you work directly in your JSON job - // specification, include this object only when your job has a transport stream - // output and the container settings contain the object M2tsSettings. + // transport stream of this output. DvbNitSettings *DvbNitSettings // Use these settings to insert a DVB Service Description Table (SDT) in the - // transport stream of this output. When you work directly in your JSON job - // specification, include this object only when your job has a transport stream - // output and the container settings contain the object M2tsSettings. + // transport stream of this output. DvbSdtSettings *DvbSdtSettings // Specify the packet identifiers (PIDs) for DVB subtitle data included in this @@ -5537,9 +5289,7 @@ type M2tsSettings struct { DvbSubPids []int32 // Use these settings to insert a DVB Time and Date Table (TDT) in the transport - // stream of this output. When you work directly in your JSON job specification, - // include this object only when your job has a transport stream output and the - // container settings contain the object M2tsSettings. + // stream of this output. DvbTdtSettings *DvbTdtSettings // Specify the packet identifier (PID) for DVB teletext data you include in this @@ -5563,9 +5313,9 @@ type M2tsSettings struct { // Controls whether to include the ES Rate field in the PES header. EsRateInPes M2tsEsRateInPes - // Keep the default value (DEFAULT) unless you know that your audio EBP markers - // are incorrectly appearing before your video EBP markers. To correct this - // problem, set this value to Force (FORCE). + // Keep the default value unless you know that your audio EBP markers are + // incorrectly appearing before your video EBP markers. To correct this problem, + // set this value to Force. ForceTsVideoEbpOrder M2tsForceTsVideoEbpOrder // The length, in seconds, of each fragment. Only used with EBP markers. @@ -5610,7 +5360,7 @@ type M2tsSettings struct { // Specify the packet identifier (PID) for the program clock reference (PCR) in // this output. If you do not specify a value, the service will use the value for - // Video PID (VideoPid). + // Video PID. PcrPid int32 // Specify the number of milliseconds between instances of the program map table @@ -5625,10 +5375,9 @@ type M2tsSettings struct { // 503. PrivateMetadataPid int32 - // Use Program number (programNumber) to specify the program number used in the - // program map table (PMT) for this output. Default is 1. Program numbers and - // program map tables are parts of MPEG-2 transport stream containers, used for - // organizing data. + // Use Program number to specify the program number used in the program map table + // (PMT) for this output. Default is 1. Program numbers and program map tables are + // parts of MPEG-2 transport stream containers, used for organizing data. ProgramNumber int32 // When set to CBR, inserts null packets into transport stream to fill specified @@ -5638,19 +5387,19 @@ type M2tsSettings struct { // Include this in your job settings to put SCTE-35 markers in your HLS and // transport stream outputs at the insertion points that you specify in an ESAM XML - // document. Provide the document in the setting SCC XML (sccXml). + // document. Provide the document in the setting SCC XML. Scte35Esam *M2tsScte35Esam // Specify the packet identifier (PID) of the SCTE-35 stream in the transport // stream. Scte35Pid int32 - // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you - // want SCTE-35 markers that appear in your input to also appear in this output. - // Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 - // markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM - // XML as a string in the setting Signal processing notification XML (sccXml). Also - // enable ESAM SCTE-35 (include the property scte35Esam). + // For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 + // markers that appear in your input to also appear in this output. Choose None if + // you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM + // XML document-- Choose None. Also provide the ESAM XML as a string in the setting + // Signal processing notification XML. Also enable ESAM SCTE-35 (include the + // property scte35Esam). Scte35Source M2tsScte35Source // Inserts segmentation markers at each segmentation_time period. rai_segstart @@ -5700,17 +5449,16 @@ type M3u8Settings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to ensure - // that the total duration of each audio stream is at least as long as the total - // duration of the video stream. After padding or trimming, the audio stream - // duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of the - // output. For unsegmented outputs, MediaConvert adds padding only to the end of - // the file. When you keep the default value, any minor discrepancies between audio - // and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. In all + // other cases, keep the default value, Default codec duration. When you choose + // Match video duration, MediaConvert pads the output audio streams with silence or + // trims them to ensure that the total duration of each audio stream is at least as + // long as the total duration of the video stream. After padding or trimming, the + // audio stream duration is no more than one frame longer than the video stream. + // MediaConvert applies audio padding or trimming only to the end of the last + // segment of the output. For unsegmented outputs, MediaConvert adds padding only + // to the end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. AudioDuration M3u8AudioDuration // The number of audio frames to insert for each PES packet. @@ -5724,7 +5472,7 @@ type M3u8Settings struct { // If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets // with Presentation Timestamp (PTS) values greater than or equal to the first // video packet PTS (MediaConvert drops captions and data packets with lesser PTS - // values). Keep the default value (AUTO) to allow all PTS values. + // values). Keep the default value AUTO to allow all PTS values. DataPTSControl M3u8DataPtsControl // Specify the maximum time, in milliseconds, between Program Clock References @@ -5765,21 +5513,19 @@ type M3u8Settings struct { // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Scte35Pid int32 - // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you - // want SCTE-35 markers that appear in your input to also appear in this output. - // Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 - // markers from an ESAM XML document-- Choose None (NONE) if you don't want - // manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers - // (adMarkers) if you do want manifest conditioning. In both cases, also provide - // the ESAM XML as a string in the setting Signal processing notification XML - // (sccXml). + // For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 + // markers that appear in your input to also appear in this output. Choose None if + // you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM + // XML document-- Choose None if you don't want manifest conditioning. Choose + // Passthrough and choose Ad markers if you do want manifest conditioning. In both + // cases, also provide the ESAM XML as a string in the setting Signal processing + // notification XML. Scte35Source M3u8Scte35Source - // Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 - // metadata in this output. This includes ID3 metadata from the following features: - // ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter - // (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 - // metadata to None (NONE) or leave blank. + // Set ID3 metadata to Passthrough to include ID3 metadata in this output. This + // includes ID3 metadata from the following features: ID3 timestamp period, and + // Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: set + // ID3 metadata to None or leave blank. TimedMetadata TimedMetadata // Packet Identifier (PID) of the ID3 metadata stream in the transport stream. @@ -5879,10 +5625,7 @@ type MotionImageInserter struct { // need to set up your job to follow timecodes embedded in your source that don't // start at zero, make sure that you specify a start time that is after the first // embedded timecode. For more information, see - // https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html Find - // job-wide and input timecode configuration settings in your JSON job settings - // specification at settings>timecodeConfig>source and - // settings>inputs>timecodeSource. + // https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html StartTime *string noSmithyDocumentSerde @@ -5951,16 +5694,15 @@ type MovSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the -// value MP2. +// Required when you set Codec to the value MP2. type Mp2Settings struct { // Specify the average bitrate in bits per second. Bitrate int32 // Set Channels to specify the number of channels in this output audio track. - // Choosing Mono in the console will give you 1 output channel; choosing Stereo - // will give you 2. In the API, valid values are 1 and 2. + // Choosing Mono in will give you 1 output channel; choosing Stereo will give you + // 2. In the API, valid values are 1 and 2. Channels int32 // Sample rate in hz. @@ -5976,9 +5718,9 @@ type Mp3Settings struct { // Specify the average bitrate in bits per second. Bitrate int32 - // Specify the number of channels in this output audio track. Choosing Mono on the - // console gives you 1 output channel; choosing Stereo gives you 2. In the API, - // valid values are 1 and 2. + // Specify the number of channels in this output audio track. Choosing Mono gives + // you 1 output channel; choosing Stereo gives you 2. In the API, valid values are + // 1 and 2. Channels int32 // Specify whether the service encodes this MP3 audio output with a constant @@ -5988,9 +5730,8 @@ type Mp3Settings struct { // Sample rate in hz. SampleRate int32 - // Required when you set Bitrate control mode (rateControlMode) to VBR. Specify - // the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest - // quality). + // Required when you set Bitrate control mode to VBR. Specify the audio quality of + // this MP3 output from 0 (highest quality) to 9 (lowest quality). VbrQuality int32 noSmithyDocumentSerde @@ -6003,17 +5744,16 @@ type Mp4Settings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to ensure - // that the total duration of each audio stream is at least as long as the total - // duration of the video stream. After padding or trimming, the audio stream - // duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of the - // output. For unsegmented outputs, MediaConvert adds padding only to the end of - // the file. When you keep the default value, any minor discrepancies between audio - // and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. In all + // other cases, keep the default value, Default codec duration. When you choose + // Match video duration, MediaConvert pads the output audio streams with silence or + // trims them to ensure that the total duration of each audio stream is at least as + // long as the total duration of the video stream. After padding or trimming, the + // audio stream duration is no more than one frame longer than the video stream. + // MediaConvert applies audio padding or trimming only to the end of the last + // segment of the output. For unsegmented outputs, MediaConvert adds padding only + // to the end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. AudioDuration CmfcAudioDuration // When enabled, file composition times will start at zero, composition times in @@ -6025,9 +5765,9 @@ type Mp4Settings struct { // Ignore this setting unless compliance to the CTTS box version specification // matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 // and make your output compliant with the specification. When you specify a value - // of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. Keep the - // default value 0 to set your CTTS box version to 0. This can provide backward - // compatibility for some players and packagers. + // of 1, you must also set CSLG atom to the value INCLUDE. Keep the default value 0 + // to set your CTTS box version to 0. This can provide backward compatibility for + // some players and packagers. CttsVersion int32 // Inserts a free-space box immediately after the moov box. @@ -6049,35 +5789,33 @@ type Mp4Settings struct { // DASH outputs. type MpdSettings struct { - // Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH - // manifest with elements for embedded 608 captions. This markup isn't generally - // required, but some video players require it to discover and play embedded 608 - // captions. Keep the default value, Exclude (EXCLUDE), to leave these elements - // out. When you enable this setting, this is the markup that MediaConvert includes - // in your manifest: + // Optional. Choose Include to have MediaConvert mark up your DASH manifest with + // elements for embedded 608 captions. This markup isn't generally required, but + // some video players require it to discover and play embedded 608 captions. Keep + // the default value, Exclude, to leave these elements out. When you enable this + // setting, this is the markup that MediaConvert includes in your manifest: AccessibilityCaptionHints MpdAccessibilityCaptionHints // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to ensure - // that the total duration of each audio stream is at least as long as the total - // duration of the video stream. After padding or trimming, the audio stream - // duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of the - // output. For unsegmented outputs, MediaConvert adds padding only to the end of - // the file. When you keep the default value, any minor discrepancies between audio - // and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. In all + // other cases, keep the default value, Default codec duration. When you choose + // Match video duration, MediaConvert pads the output audio streams with silence or + // trims them to ensure that the total duration of each audio stream is at least as + // long as the total duration of the video stream. After padding or trimming, the + // audio stream duration is no more than one frame longer than the video stream. + // MediaConvert applies audio padding or trimming only to the end of the last + // segment of the output. For unsegmented outputs, MediaConvert adds padding only + // to the end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. AudioDuration MpdAudioDuration // Use this setting only in DASH output groups that include sidecar TTML or IMSC // captions. You specify sidecar captions in a separate output from your audio and - // video. Choose Raw (RAW) for captions in a single XML file in a raw container. - // Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained - // within fragmented MP4 files. This set of fragmented MP4 files is separate from - // your video and audio fragmented MP4 files. + // video. Choose Raw for captions in a single XML file in a raw container. Choose + // Fragmented MPEG-4 for captions in XML format contained within fragmented MP4 + // files. This set of fragmented MP4 files is separate from your video and audio + // fragmented MP4 files. CaptionContainerType MpdCaptionContainerType // To include key-length-value metadata in this output: Set KLV metadata insertion @@ -6094,58 +5832,55 @@ type MpdSettings struct { // InbandEventStream element schemeIdUri will be "urn:scte:scte35:2013:bin". To // leave these elements out of your output MPD manifest, set Manifest metadata // signaling to Disabled. To enable Manifest metadata signaling, you must also set - // SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata - // (TimedMetadata) to Passthrough. + // SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to + // Passthrough. ManifestMetadataSignaling MpdManifestMetadataSignaling // Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT // to put SCTE-35 markers in this output at the insertion points that you specify - // in an ESAM XML document. Provide the document in the setting SCC XML (sccXml). + // in an ESAM XML document. Provide the document in the setting SCC XML. Scte35Esam MpdScte35Esam // Ignore this setting unless you have SCTE-35 markers in your input video file. - // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your - // input to also appear in this output. Choose None (NONE) if you don't want those - // SCTE-35 markers in this output. + // Choose Passthrough if you want SCTE-35 markers that appear in your input to also + // appear in this output. Choose None if you don't want those SCTE-35 markers in + // this output. Scte35Source MpdScte35Source - // To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to - // Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata - // inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 - // metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: - // Set ID3 metadata to None (NONE) or leave blank. + // To include ID3 metadata in this output: Set ID3 metadata to Passthrough. + // Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes + // each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude + // this ID3 metadata: Set ID3 metadata to None or leave blank. TimedMetadata MpdTimedMetadata // Specify the event message box (eMSG) version for ID3 timed metadata in your // output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 // Syntax. Leave blank to use the default value Version 0. When you specify Version - // 1, you must also set ID3 metadata (timedMetadata) to Passthrough. + // 1, you must also set ID3 metadata to Passthrough. TimedMetadataBoxVersion MpdTimedMetadataBoxVersion - // Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 - // timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 - // section 5.10.3.3.4 Semantics. Leave blank to use the default value: - // https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID - // URI, you must also set ID3 metadata (timedMetadata) to Passthrough. + // Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata in + // your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 + // Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 + // When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 + // metadata to Passthrough. TimedMetadataSchemeIdUri *string // Specify the event message box (eMSG) value for ID3 timed metadata in your // output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 // Semantics. When you specify a value for ID3 Metadata Value, you must also set - // ID3 metadata (timedMetadata) to Passthrough. + // ID3 metadata to Passthrough. TimedMetadataValue *string noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value MPEG2. +// Required when you set Codec to the value MPEG2. type Mpeg2Settings struct { // Specify the strength of any adaptive quantization filters that you enable. The // value that you choose here applies to the following settings: Spatial adaptive - // quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization - // (temporalAdaptiveQuantization). + // quantization, and Temporal adaptive quantization. AdaptiveQuantization Mpeg2AdaptiveQuantization // Specify the average bitrate in bits per second. Required for VBR and CBR. For @@ -6153,18 +5888,17 @@ type Mpeg2Settings struct { // multiple of 1000. Bitrate int32 - // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. + // Use Level to set the MPEG-2 level for the video output. CodecLevel Mpeg2CodecLevel - // Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. + // Use Profile to set the MPEG-2 profile for the video output. CodecProfile Mpeg2CodecProfile // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information based // on other frames) for high-motion portions of the video and more B-frames for // low-motion portions. The maximum number of B-frames is limited by the value you - // provide for the setting B frames between reference frames - // (numberBFramesBetweenReferenceFrames). + // provide for the setting B frames between reference frames. DynamicSubGop Mpeg2DynamicSubGop // If you are using the console, use the Framerate setting to specify the frame @@ -6172,12 +5906,7 @@ type Mpeg2Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list or choose Custom. The framerates shown in the // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // specify your frame rate as a fraction. FramerateControl Mpeg2FramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -6219,12 +5948,12 @@ type Mpeg2Settings struct { // Specify the interval between keyframes, in seconds or frames, for this output. // Default: 12 Related settings: When you specify the GOP size in seconds, set GOP - // mode control (GopSizeUnits) to Specified, seconds (SECONDS). The default value - // for GOP mode control (GopSizeUnits) is Frames (FRAMES). + // mode control to Specified, seconds. The default value for GOP mode control is + // Frames. GopSize float64 - // Specify the units for GOP size (GopSize). If you don't specify a value here, by - // default the encoder measures GOP size in frames. + // Specify the units for GOP size. If you don't specify a value here, by default + // the encoder measures GOP size in frames. GopSizeUnits Mpeg2GopSizeUnits // If your downstream systems have strict buffer requirements: Specify the minimum @@ -6241,40 +5970,38 @@ type Mpeg2Settings struct { HrdBufferSize int32 // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type of - // your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom - // (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the - // source. For jobs that have multiple inputs, the output field polarity might - // change over the course of the output. Follow behavior depends on the input scan - // type. If the source is interlaced, the output will be interlaced with the same - // polarity as the source. If the source is progressive, the output will be - // interlaced with top field bottom field first, depending on which of the Follow - // options you choose. + // to create a progressive output, regardless of the scan type of your input. Use + // Top field first or Bottom field first to create an output that's interlaced with + // the same field polarity throughout. Use Follow, default top or Follow, default + // bottom to produce outputs with the same field polarity as the source. For jobs + // that have multiple inputs, the output field polarity might change over the + // course of the output. Follow behavior depends on the input scan type. If the + // source is interlaced, the output will be interlaced with the same polarity as + // the source. If the source is progressive, the output will be interlaced with top + // field bottom field first, depending on which of the Follow options you choose. InterlaceMode Mpeg2InterlaceMode - // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision - // for intra-block DC coefficients. If you choose the value auto, the service will - // automatically select the precision based on the per-frame compression ratio. + // Use Intra DC precision to set quantization precision for intra-block DC + // coefficients. If you choose the value auto, the service will automatically + // select the precision based on the per-frame compression ratio. IntraDcPrecision Mpeg2IntraDcPrecision // Maximum bitrate in bits/second. For example, enter five megabits per second as // 5000000. MaxBitrate int32 - // Use this setting only when you also enable Scene change detection - // (SceneChangeDetect). This setting determines how the encoder manages the spacing - // between I-frames that it inserts as part of the I-frame cadence and the I-frames - // that it inserts for Scene change detection. When you specify a value for this - // setting, the encoder determines whether to skip a cadence-driven I-frame by the - // value you set. For example, if you set Min I interval (minIInterval) to 5 and a - // cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, - // then the encoder skips the cadence-driven I-frame. In this way, one GOP is - // shrunk slightly and one GOP is stretched slightly. When the cadence-driven - // I-frames are farther from the scene-change I-frame than the value you set, then - // the encoder leaves all I-frames in place and the GOPs surrounding the scene - // change are smaller than the usual cadence GOPs. + // Use this setting only when you also enable Scene change detection. This setting + // determines how the encoder manages the spacing between I-frames that it inserts + // as part of the I-frame cadence and the I-frames that it inserts for Scene change + // detection. When you specify a value for this setting, the encoder determines + // whether to skip a cadence-driven I-frame by the value you set. For example, if + // you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 + // frames of a scene-change I-frame, then the encoder skips the cadence-driven + // I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched + // slightly. When the cadence-driven I-frames are farther from the scene-change + // I-frame than the value you set, then the encoder leaves all I-frames in place + // and the GOPs surrounding the scene change are smaller than the usual cadence + // GOPs. MinIInterval int32 // Specify the number of B-frames that MediaConvert puts between reference frames @@ -6283,52 +6010,47 @@ type Mpeg2Settings struct { NumberBFramesBetweenReferenceFrames int32 // Optional. Specify how the service determines the pixel aspect ratio (PAR) for - // this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses - // the PAR from your input video for your output. To specify a different PAR in the - // console, choose any value other than Follow source. To specify a different PAR - // by editing the JSON job specification, choose SPECIFIED. When you choose - // SPECIFIED for this setting, you must also specify values for the parNumerator - // and parDenominator settings. + // this output. The default behavior, Follow source, uses the PAR from your input + // video for your output. To specify a different PAR in the console, choose any + // value other than Follow source. When you choose SPECIFIED for this setting, you + // must also specify values for the parNumerator and parDenominator settings. ParControl Mpeg2ParControl - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is 33. ParDenominator int32 - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator int32 - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want - // to trade off encoding speed for output video quality. The default behavior is - // faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel Mpeg2QualityTuningLevel - // Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate is - // variable (vbr) or constant (cbr). + // Use Rate control mode to specify whether the bitrate is variable (vbr) or + // constant (cbr). RateControlMode Mpeg2RateControlMode // Use this setting for interlaced outputs, when your output frame rate is half of - // your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced field - // in the output. Keep the default value, Basic interlacing (INTERLACED), for all - // other output frame rates. With basic interlacing, MediaConvert performs any - // frame rate conversion first and then interlaces the frames. When you choose - // Optimized interlacing and you set your output frame rate to a value that isn't - // suitable for optimized interlacing, MediaConvert automatically falls back to - // basic interlacing. Required settings: To use optimized interlacing, you must set - // Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized - // interlacing for hard telecine outputs. You must also set Interlace mode - // (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // your input frame rate. In this situation, choose Optimized interlacing to create + // a better quality interlaced output. In this case, each progressive frame from + // the input corresponds to an interlaced field in the output. Keep the default + // value, Basic interlacing, for all other output frame rates. With basic + // interlacing, MediaConvert performs any frame rate conversion first and then + // interlaces the frames. When you choose Optimized interlacing and you set your + // output frame rate to a value that isn't suitable for optimized interlacing, + // MediaConvert automatically falls back to basic interlacing. Required settings: + // To use optimized interlacing, you must set Telecine to None or Soft. You can't + // use optimized interlacing for hard telecine outputs. You must also set Interlace + // mode to a value other than Progressive. ScanTypeConversionMode Mpeg2ScanTypeConversionMode // Enable this setting to insert I-frames at scene changes that the service @@ -6340,65 +6062,62 @@ type Mpeg2Settings struct { // PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio // to keep it synchronized with the video. Note that enabling this setting will // slightly reduce the duration of your video. Required settings: You must also set - // Framerate to 25. In your JSON job specification, set (framerateControl) to - // (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1. + // Framerate to 25. SlowPal Mpeg2SlowPal // Ignore this setting unless you need to comply with a specification that // requires a specific value. If you don't have a specification requirement, we // recommend that you adjust the softness of your output by using a lower value for - // the setting Sharpness (sharpness) or by enabling a noise reducer filter - // (noiseReducerFilter). The Softness (softness) setting specifies the quantization - // matrices that the encoder uses. Keep the default value, 0, to use the AWS - // Elemental default matrices. Choose a value from 17 to 128 to use planar - // interpolation. Increasing values from 17 to 128 result in increasing reduction - // of high-frequency data. The value 128 results in the softest video. + // the setting Sharpness or by enabling a noise reducer filter. The Softness + // setting specifies the quantization matrices that the encoder uses. Keep the + // default value, 0, to use the AWS Elemental default matrices. Choose a value from + // 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result + // in increasing reduction of high-frequency data. The value 128 results in the + // softest video. Softness int32 - // Keep the default value, Enabled (ENABLED), to adjust quantization within each - // frame based on spatial variation of content complexity. When you enable this - // feature, the encoder uses fewer bits on areas that can sustain more distortion - // with no noticeable visual degradation and uses more bits on areas where any - // small distortion will be noticeable. For example, complex textured blocks are - // encoded with fewer bits and smooth textured blocks are encoded with more bits. - // Enabling this feature will almost always improve your video quality. Note, - // though, that this feature doesn't take into account where the viewer's attention - // is likely to be. If viewers are likely to be focusing their attention on a part - // of the screen with a lot of complex texture, you might choose to disable this - // feature. Related setting: When you enable spatial adaptive quantization, set the - // value for Adaptive quantization (adaptiveQuantization) depending on your - // content. For homogeneous content, such as cartoons and video games, set it to - // Low. For content with a wider variety of textures, set it to High or Higher. + // Keep the default value, Enabled, to adjust quantization within each frame based + // on spatial variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas that can sustain more distortion with no + // noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, that + // this feature doesn't take into account where the viewer's attention is likely to + // be. If viewers are likely to be focusing their attention on a part of the screen + // with a lot of complex texture, you might choose to disable this feature. Related + // setting: When you enable spatial adaptive quantization, set the value for + // Adaptive quantization depending on your content. For homogeneous content, such + // as cartoons and video games, set it to Low. For content with a wider variety of + // textures, set it to High or Higher. SpatialAdaptiveQuantization Mpeg2SpatialAdaptiveQuantization // Specify whether this output's video uses the D10 syntax. Keep the default value - // to not use the syntax. Related settings: When you choose D10 (D_10) for your MXF - // profile (profile), you must also set this value to D10 (D_10). + // to not use the syntax. Related settings: When you choose D10 for your MXF + // profile, you must also set this value to D10. Syntax Mpeg2Syntax // When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 // fps, and your output scan type is interlaced, you can optionally enable hard or - // soft telecine to create a smoother picture. Hard telecine (HARD) produces a - // 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that - // signals to the video player device to do the conversion during play back. When - // you keep the default value, None (NONE), MediaConvert does a standard frame rate - // conversion to 29.97 without doing anything with the field polarity to create a - // smoother picture. + // soft telecine to create a smoother picture. Hard telecine produces a 29.97i + // output. Soft telecine produces an output with a 23.976 output that signals to + // the video player device to do the conversion during play back. When you keep the + // default value, None, MediaConvert does a standard frame rate conversion to 29.97 + // without doing anything with the field polarity to create a smoother picture. Telecine Mpeg2Telecine - // Keep the default value, Enabled (ENABLED), to adjust quantization within each - // frame based on temporal variation of content complexity. When you enable this - // feature, the encoder uses fewer bits on areas of the frame that aren't moving - // and uses more bits on complex objects with sharp edges that move a lot. For - // example, this feature improves the readability of text tickers on newscasts and - // scoreboards on sports matches. Enabling this feature will almost always improve - // your video quality. Note, though, that this feature doesn't take into account - // where the viewer's attention is likely to be. If viewers are likely to be - // focusing their attention on a part of the screen that doesn't have moving - // objects with sharp edges, such as sports athletes' faces, you might choose to - // disable this feature. Related setting: When you enable temporal quantization, - // adjust the strength of the filter with the setting Adaptive quantization - // (adaptiveQuantization). + // Keep the default value, Enabled, to adjust quantization within each frame based + // on temporal variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas of the frame that aren't moving and uses more + // bits on complex objects with sharp edges that move a lot. For example, this + // feature improves the readability of text tickers on newscasts and scoreboards on + // sports matches. Enabling this feature will almost always improve your video + // quality. Note, though, that this feature doesn't take into account where the + // viewer's attention is likely to be. If viewers are likely to be focusing their + // attention on a part of the screen that doesn't have moving objects with sharp + // edges, such as sports athletes' faces, you might choose to disable this feature. + // Related setting: When you enable temporal quantization, adjust the strength of + // the filter with the setting Adaptive quantization. TemporalAdaptiveQuantization Mpeg2TemporalAdaptiveQuantization noSmithyDocumentSerde @@ -6424,8 +6143,7 @@ type MsSmoothAdditionalManifest struct { noSmithyDocumentSerde } -// If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify -// the value SpekeKeyProvider. +// If you are using DRM, set DRM System to specify the value SpekeKeyProvider. type MsSmoothEncryptionSettings struct { // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings @@ -6438,10 +6156,7 @@ type MsSmoothEncryptionSettings struct { // Settings related to your Microsoft Smooth Streaming output package. For more // information, see -// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When -// you work directly in your JSON job specification, include this object and any -// required children when you set Type, under OutputGroupSettings, to -// MS_SMOOTH_GROUP_SETTINGS. +// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. type MsSmoothGroupSettings struct { // By default, the service creates one .ism Microsoft Smooth Streaming manifest @@ -6455,37 +6170,33 @@ type MsSmoothGroupSettings struct { // Microsoft Smooth output group into a single audio stream. AudioDeduplication MsSmoothAudioDeduplication - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify the - // base filename in the URI, the service will use the filename of the input file. - // If your job has multiple inputs, the service uses the filename of the first - // input file. + // Use Destination to specify the S3 output location and the output filename base. + // Destination accepts format identifiers. If you do not specify the base filename + // in the URI, the service will use the filename of the input file. If your job has + // multiple inputs, the service uses the filename of the first input file. Destination *string // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings - // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify - // the value SpekeKeyProvider. + // If you are using DRM, set DRM System to specify the value SpekeKeyProvider. Encryption *MsSmoothEncryptionSettings // Specify how you want MediaConvert to determine the fragment length. Choose - // Exact (EXACT) to have the encoder use the exact length that you specify with the - // setting Fragment length (FragmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // Exact to have the encoder use the exact length that you specify with the setting + // Fragment length. This might result in extra I-frames. Choose Multiple of GOP to + // have the encoder round up the segment lengths to match the next GOP boundary. FragmentLength int32 // Specify how you want MediaConvert to determine the fragment length. Choose - // Exact (EXACT) to have the encoder use the exact length that you specify with the - // setting Fragment length (FragmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // Exact to have the encoder use the exact length that you specify with the setting + // Fragment length. This might result in extra I-frames. Choose Multiple of GOP to + // have the encoder round up the segment lengths to match the next GOP boundary. FragmentLengthControl MsSmoothFragmentLengthControl - // Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding format - // for the server and client manifest. Valid options are utf8 and utf16. + // Use Manifest encoding to specify the encoding format for the server and client + // manifest. Valid options are utf8 and utf16. ManifestEncoding MsSmoothManifestEncoding noSmithyDocumentSerde @@ -6496,12 +6207,12 @@ type MxfSettings struct { // Optional. When you have AFD signaling set up in your output video stream, use // this setting to choose whether to also include it in the MXF wrapper. Choose - // Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy - // from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream - // for this output to the MXF wrapper. Regardless of which option you choose, the - // AFD values remain in the video stream. Related settings: To set up your output - // to include or exclude AFD values, see AfdSignaling, under VideoDescription. On - // the console, find AFD signaling under the output's video encoding settings. + // Don't copy to exclude AFD signaling from the MXF wrapper. Choose Copy from video + // stream to copy the AFD values from the video stream for this output to the MXF + // wrapper. Regardless of which option you choose, the AFD values remain in the + // video stream. Related settings: To set up your output to include or exclude AFD + // values, see AfdSignaling, under VideoDescription. On the console, find AFD + // signaling under the output's video encoding settings. AfdSignaling MxfAfdSignaling // Specify the MXF profile, also called shim, for this output. To automatically @@ -6524,20 +6235,19 @@ type MxfSettings struct { type MxfXavcProfileSettings struct { // To create an output that complies with the XAVC file format guidelines for - // interoperability, keep the default value, Drop frames for compliance - // (DROP_FRAMES_FOR_COMPLIANCE). To include all frames from your input in this - // output, keep the default setting, Allow any duration (ALLOW_ANY_DURATION). The - // number of frames that MediaConvert excludes when you set this to Drop frames for - // compliance depends on the output frame rate and duration. + // interoperability, keep the default value, Drop frames for compliance. To include + // all frames from your input in this output, keep the default setting, Allow any + // duration. The number of frames that MediaConvert excludes when you set this to + // Drop frames for compliance depends on the output frame rate and duration. DurationMode MxfXavcDurationMode // Specify a value for this setting only for outputs that you set up with one of - // these two XAVC profiles: XAVC HD Intra CBG (XAVC_HD_INTRA_CBG) or XAVC 4K Intra - // CBG (XAVC_4K_INTRA_CBG). Specify the amount of space in each frame that the - // service reserves for ancillary data, such as teletext captions. The default - // value for this setting is 1492 bytes per frame. This should be sufficient to - // prevent overflow unless you have multiple pages of teletext captions data. If - // you have a large amount of teletext data, specify a larger number. + // these two XAVC profiles: XAVC HD Intra CBG or XAVC 4K Intra CBG. Specify the + // amount of space in each frame that the service reserves for ancillary data, such + // as teletext captions. The default value for this setting is 1492 bytes per + // frame. This should be sufficient to prevent overflow unless you have multiple + // pages of teletext captions data. If you have a large amount of teletext data, + // specify a larger number. MaxAncDataSize int32 noSmithyDocumentSerde @@ -6550,26 +6260,23 @@ type NexGuardFileMarkerSettings struct { // Use the base64 license string that Nagra provides you. Enter it directly in // your JSON job specification or in the console. Required when you include Nagra - // NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job. + // NexGuard File Marker watermarking in your job. License *string // Specify the payload ID that you want associated with this output. Valid values // vary depending on your Nagra NexGuard forensic watermarking workflow. Required - // when you include Nagra NexGuard File Marker watermarking - // (NexGuardWatermarkingSettings) in your job. For PreRelease Content (NGPR/G2), - // specify an integer from 1 through 4,194,303. You must generate a unique ID for - // each asset you watermark, and keep a record of which ID you have assigned to - // each asset. Neither Nagra nor MediaConvert keep track of the relationship - // between output files and your IDs. For OTT Streaming, create two adaptive - // bitrate (ABR) stacks for each asset. Do this by setting up two output groups. - // For one output group, set the value of Payload ID (payload) to 0 in every - // output. For the other output group, set Payload ID (payload) to 1 in every - // output. + // when you include Nagra NexGuard File Marker watermarking in your job. For + // PreRelease Content (NGPR/G2), specify an integer from 1 through 4,194,303. You + // must generate a unique ID for each asset you watermark, and keep a record of + // which ID you have assigned to each asset. Neither Nagra nor MediaConvert keep + // track of the relationship between output files and your IDs. For OTT Streaming, + // create two adaptive bitrate (ABR) stacks for each asset. Do this by setting up + // two output groups. For one output group, set the value of Payload ID to 0 in + // every output. For the other output group, set Payload ID to 1 in every output. Payload int32 // Enter one of the watermarking preset strings that Nagra provides you. Required - // when you include Nagra NexGuard File Marker watermarking - // (NexGuardWatermarkingSettings) in your job. + // when you include Nagra NexGuard File Marker watermarking in your job. Preset *string // Optional. Ignore this setting unless Nagra support directs you to specify a @@ -6581,20 +6288,16 @@ type NexGuardFileMarkerSettings struct { } // Settings for your Nielsen configuration. If you don't do Nielsen measurement -// and analytics, ignore these settings. When you enable Nielsen configuration -// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs -// in the job. To enable Nielsen configuration programmatically, include an -// instance of nielsenConfiguration in your JSON job specification. Even if you -// don't include any children of nielsenConfiguration, you still enable the -// setting. +// and analytics, ignore these settings. When you enable Nielsen configuration, +// MediaConvert enables PCM to ID3 tagging for all outputs in the job. type NielsenConfiguration struct { // Nielsen has discontinued the use of breakout code functionality. If you must // include this property, set the value to zero. BreakoutCode int32 - // Use Distributor ID (DistributorID) to specify the distributor ID that is - // assigned to your organization by Neilsen. + // Use Distributor ID to specify the distributor ID that is assigned to your + // organization by Nielsen. DistributorId *string noSmithyDocumentSerde @@ -6610,17 +6313,16 @@ type NielsenConfiguration struct { type NielsenNonLinearWatermarkSettings struct { // Choose the type of Nielsen watermarks that you want in your outputs. When you - // choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the setting - // SID (sourceId). When you choose CBET (CBET), you must provide a value for the - // setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET - // (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings. + // choose NAES 2 and NW, you must provide a value for the setting SID. When you + // choose CBET, you must provide a value for the setting CSID. When you choose NAES + // 2, NW, and CBET, you must provide values for both of these settings. ActiveWatermarkProcess NielsenActiveWatermarkProcessType // Optional. Use this setting when you want the service to include an ADI file in // the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon S3 // and provide a URL to it here. The URL should be in the following format: // S3://bucket/path/ADI-file. For more information about the metadata .zip file, - // see the setting Metadata destination (metadataDestination). + // see the setting Metadata destination. AdiFilename *string // Use the asset ID that you provide to Nielsen to uniquely identify this asset. @@ -6634,7 +6336,7 @@ type NielsenNonLinearWatermarkSettings struct { // Use the CSID that Nielsen provides to you. This CBET source ID should be unique // to your Nielsen account but common to all of your output assets that have CBET // watermarking. Required when you choose a value for the setting Watermark types - // (ActiveWatermarkProcess) that includes CBET. + // that includes CBET. CbetSourceId *string // Optional. If this asset uses an episode ID with Nielsen, provide it here. @@ -6643,10 +6345,10 @@ type NielsenNonLinearWatermarkSettings struct { // Specify the Amazon S3 location where you want MediaConvert to save your Nielsen // non-linear metadata .zip file. This Amazon S3 bucket must be in the same Region // as the one where you do your MediaConvert transcoding. If you want to include an - // ADI file in this .zip file, use the setting ADI file (adiFilename) to specify - // it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata - // destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You - // are responsible for delivering the metadata .zip files to Nielsen. + // ADI file in this .zip file, use the setting ADI file to specify it. MediaConvert + // delivers the Nielsen metadata .zip files only to your metadata destination + // Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You are + // responsible for delivering the metadata .zip files to Nielsen. MetadataDestination *string // Use the SID that Nielsen provides to you. This source ID should be unique to @@ -6657,9 +6359,9 @@ type NielsenNonLinearWatermarkSettings struct { SourceId int32 // Required. Specify whether your source content already contains Nielsen - // non-linear watermarks. When you set this value to Watermarked (WATERMARKED), the - // service fails the job. Nielsen requires that you add non-linear watermarking to - // only clean content that doesn't already have non-linear Nielsen watermarks. + // non-linear watermarks. When you set this value to Watermarked, the service fails + // the job. Nielsen requires that you add non-linear watermarking to only clean + // content that doesn't already have non-linear Nielsen watermarks. SourceWatermarkStatus NielsenSourceWatermarkStatusType // Specify the endpoint for the TIC server that you have deployed and configured @@ -6671,9 +6373,8 @@ type NielsenNonLinearWatermarkSettings struct { TicServerUrl *string // To create assets that have the same TIC values in each audio track, keep the - // default value Share TICs (SAME_TICS_PER_TRACK). To create assets that have - // unique TIC values for each audio track, choose Use unique TICs - // (RESERVE_UNIQUE_TICS_PER_TRACK). + // default value Share TICs. To create assets that have unique TIC values for each + // audio track, choose Use unique TICs. UniqueTicPerAudioTrack NielsenUniqueTicPerAudioTrackType noSmithyDocumentSerde @@ -6686,13 +6387,12 @@ type NielsenNonLinearWatermarkSettings struct { // reducer, you cannot include the Bandwidth reduction filter. type NoiseReducer struct { - // Use Noise reducer filter (NoiseReducerFilter) to select one of the following - // spatial image filtering functions. To use this setting, you must also enable - // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing noise. - // * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution - // filtering. * Conserve does min/max noise reduction. * Spatial does - // frequency-domain filtering based on JND principles. * Temporal optimizes video - // quality for complex motion. + // Use Noise reducer filter to select one of the following spatial image filtering + // functions. To use this setting, you must also enable Noise reducer. * Bilateral + // preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and + // Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise + // reduction. * Spatial does frequency-domain filtering based on JND principles. * + // Temporal optimizes video quality for complex motion. Filter NoiseReducerFilter // Settings for a noise reducer filter @@ -6743,22 +6443,20 @@ type NoiseReducerTemporalFilterSettings struct { // aggressively and creates better VQ for low bitrate outputs. AggressiveMode int32 - // When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth - // and sharpness of your output is reduced. You can optionally use Post temporal - // sharpening (postTemporalSharpening) to apply sharpening to the edges of your - // output. Note that Post temporal sharpening will also make the bandwidth - // reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), - // allows the transcoder to determine whether to apply sharpening, depending on - // your input type and quality. When you set Post temporal sharpening to Enabled - // (ENABLED), specify how much sharpening is applied using Post temporal sharpening - // strength (postTemporalSharpeningStrength). Set Post temporal sharpening to - // Disabled (DISABLED) to not apply sharpening. + // When you set Noise reducer to Temporal, the bandwidth and sharpness of your + // output is reduced. You can optionally use Post temporal sharpening to apply + // sharpening to the edges of your output. Note that Post temporal sharpening will + // also make the bandwidth reduction from the Noise reducer smaller. The default + // behavior, Auto, allows the transcoder to determine whether to apply sharpening, + // depending on your input type and quality. When you set Post temporal sharpening + // to Enabled, specify how much sharpening is applied using Post temporal + // sharpening strength. Set Post temporal sharpening to Disabled to not apply + // sharpening. PostTemporalSharpening NoiseFilterPostTemporalSharpening - // Use Post temporal sharpening strength (postTemporalSharpeningStrength) to - // define the amount of sharpening the transcoder applies to your output. Set Post - // temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to - // indicate the amount of sharpening. + // Use Post temporal sharpening strength to define the amount of sharpening the + // transcoder applies to your output. Set Post temporal sharpening strength to Low, + // Medium, or High to indicate the amount of sharpening. PostTemporalSharpeningStrength NoiseFilterPostTemporalSharpeningStrength // The speed of the filter (higher number is faster). Low setting reduces bit rate @@ -6785,9 +6483,9 @@ type OpusSettings struct { // we recommend for quality and bandwidth. Bitrate int32 - // Specify the number of channels in this output audio track. Choosing Mono on the - // console gives you 1 output channel; choosing Stereo gives you 2. In the API, - // valid values are 1 and 2. + // Specify the number of channels in this output audio track. Choosing Mono on + // gives you 1 output channel; choosing Stereo gives you 2. In the API, valid + // values are 1 and 2. Channels int32 // Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The @@ -6802,42 +6500,39 @@ type OpusSettings struct { // https://docs.aws.amazon.com/mediaconvert/latest/ug/create-outputs.html. type Output struct { - // (AudioDescriptions) contains groups of audio encoding settings organized by - // audio codec. Include one instance of (AudioDescriptions) per output. - // (AudioDescriptions) can contain multiple groups of encoding settings. + // Contains groups of audio encoding settings organized by audio codec. Include + // one instance of per output. Can contain multiple groups of encoding settings. AudioDescriptions []AudioDescription - // (CaptionDescriptions) contains groups of captions settings. For each output - // that has captions, include one instance of (CaptionDescriptions). - // (CaptionDescriptions) can contain multiple groups of captions settings. + // Contains groups of captions settings. For each output that has captions, + // include one instance of CaptionDescriptions. Can contain multiple groups of + // captions settings. CaptionDescriptions []CaptionDescription // Container specific settings. ContainerSettings *ContainerSettings - // Use Extension (Extension) to specify the file extension for outputs in File - // output groups. If you do not specify a value, the service will use default - // extensions by container type as follows * MPEG-2 transport stream, m2ts * - // Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, - // webm * No Container, the service will use codec extensions (e.g. AAC, H265, - // H265, AC3) + // Use Extension to specify the file extension for outputs in File output groups. + // If you do not specify a value, the service will use default extensions by + // container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF + // container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, + // the service will use codec extensions (e.g. AAC, H265, H265, AC3) Extension *string - // Use Name modifier (NameModifier) to have the service add a string to the end of - // each output filename. You specify the base filename as part of your destination - // URI. When you create multiple outputs in the same output group, Name modifier - // (NameModifier) is required. Name modifier also accepts format identifiers. For - // DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in - // one output, you must use them in the same way in all outputs of the output - // group. + // Use Name modifier to have the service add a string to the end of each output + // filename. You specify the base filename as part of your destination URI. When + // you create multiple outputs in the same output group, Name modifier is required. + // Name modifier also accepts format identifiers. For DASH ISO outputs, if you use + // the format identifiers $Number$ or $Time$ in one output, you must use them + // in the same way in all outputs of the output group. NameModifier *string // Specific settings for this type of output. OutputSettings *OutputSettings - // Use Preset (Preset) to specify a preset for your transcoding settings. Provide - // the system or custom preset name. You can specify either Preset (Preset) or - // Container settings (ContainerSettings), but not both. + // Use Preset to specify a preset for your transcoding settings. Provide the + // system or custom preset name. You can specify either Preset or Container + // settings, but not both. Preset *string // VideoDescription contains a group of video encoding settings. The specific @@ -6882,8 +6577,8 @@ type OutputGroup struct { // you, based on characteristics of your input video. AutomatedEncodingSettings *AutomatedEncodingSettings - // Use Custom Group Name (CustomName) to specify a name for the output group. This - // value is displayed on the console and can make your job settings JSON more + // Use Custom Group Name to specify a name for the output group. This value is + // displayed on the console and can make your job settings JSON more // human-readable. It does not affect your outputs. Use up to twelve characters // that are either letters, numbers, spaces, or underscores. CustomName *string @@ -6913,39 +6608,24 @@ type OutputGroupDetail struct { type OutputGroupSettings struct { // Settings related to your CMAF output package. For more information, see - // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When - // you work directly in your JSON job specification, include this object and any - // required children when you set Type, under OutputGroupSettings, to - // CMAF_GROUP_SETTINGS. + // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. CmafGroupSettings *CmafGroupSettings // Settings related to your DASH output package. For more information, see - // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When - // you work directly in your JSON job specification, include this object and any - // required children when you set Type, under OutputGroupSettings, to - // DASH_ISO_GROUP_SETTINGS. + // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. DashIsoGroupSettings *DashIsoGroupSettings // Settings related to your File output group. MediaConvert uses this group of // settings to generate a single standalone file, rather than a streaming package. - // When you work directly in your JSON job specification, include this object and - // any required children when you set Type, under OutputGroupSettings, to - // FILE_GROUP_SETTINGS. FileGroupSettings *FileGroupSettings // Settings related to your HLS output package. For more information, see - // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When - // you work directly in your JSON job specification, include this object and any - // required children when you set Type, under OutputGroupSettings, to - // HLS_GROUP_SETTINGS. + // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. HlsGroupSettings *HlsGroupSettings // Settings related to your Microsoft Smooth Streaming output package. For more // information, see - // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When - // you work directly in your JSON job specification, include this object and any - // required children when you set Type, under OutputGroupSettings, to - // MS_SMOOTH_GROUP_SETTINGS. + // https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. MsSmoothGroupSettings *MsSmoothGroupSettings // Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth @@ -7033,9 +6713,8 @@ type Preset struct { // Settings for preset type PresetSettings struct { - // (AudioDescriptions) contains groups of audio encoding settings organized by - // audio codec. Include one instance of (AudioDescriptions) per output. - // (AudioDescriptions) can contain multiple groups of encoding settings. + // Contains groups of audio encoding settings organized by audio codec. Include + // one instance of per output. Can contain multiple groups of encoding settings. AudioDescriptions []AudioDescription // This object holds groups of settings related to captions for one output. For @@ -7053,23 +6732,21 @@ type PresetSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value PRORES. +// Required when you set Codec to the value PRORES. type ProresSettings struct { // This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you // create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling - // (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You - // must specify a value for this setting when your output codec profile supports - // 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 - // chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 - // chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. - // Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the - // following Preprocessors: Dolby Vision, HDR10+, or Noise reducer. + // to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for + // this setting when your output codec profile supports 4:4:4 chroma sampling. + // Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose + // Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your + // output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose + // Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: + // Dolby Vision, HDR10+, or Noise reducer. ChromaSampling ProresChromaSampling - // Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec to - // use for this output. + // Use Profile to specify the type of Apple ProRes codec to use for this output. CodecProfile ProresCodecProfile // If you are using the console, use the Framerate setting to specify the frame @@ -7077,12 +6754,7 @@ type ProresSettings struct { // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list or choose Custom. The framerates shown in the // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // specify your frame rate as a fraction. FramerateControl ProresFramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -7115,57 +6787,50 @@ type ProresSettings struct { FramerateNumerator int32 // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type of - // your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom - // (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the - // source. For jobs that have multiple inputs, the output field polarity might - // change over the course of the output. Follow behavior depends on the input scan - // type. If the source is interlaced, the output will be interlaced with the same - // polarity as the source. If the source is progressive, the output will be - // interlaced with top field bottom field first, depending on which of the Follow - // options you choose. + // to create a progressive output, regardless of the scan type of your input. Use + // Top field first or Bottom field first to create an output that's interlaced with + // the same field polarity throughout. Use Follow, default top or Follow, default + // bottom to produce outputs with the same field polarity as the source. For jobs + // that have multiple inputs, the output field polarity might change over the + // course of the output. Follow behavior depends on the input scan type. If the + // source is interlaced, the output will be interlaced with the same polarity as + // the source. If the source is progressive, the output will be interlaced with top + // field bottom field first, depending on which of the Follow options you choose. InterlaceMode ProresInterlaceMode // Optional. Specify how the service determines the pixel aspect ratio (PAR) for - // this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses - // the PAR from your input video for your output. To specify a different PAR in the - // console, choose any value other than Follow source. To specify a different PAR - // by editing the JSON job specification, choose SPECIFIED. When you choose - // SPECIFIED for this setting, you must also specify values for the parNumerator - // and parDenominator settings. + // this output. The default behavior, Follow source, uses the PAR from your input + // video for your output. To specify a different PAR, choose any value other than + // Follow source. When you choose SPECIFIED for this setting, you must also specify + // values for the parNumerator and parDenominator settings. ParControl ProresParControl - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is 33. ParDenominator int32 - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator int32 // Use this setting for interlaced outputs, when your output frame rate is half of - // your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced field - // in the output. Keep the default value, Basic interlacing (INTERLACED), for all - // other output frame rates. With basic interlacing, MediaConvert performs any - // frame rate conversion first and then interlaces the frames. When you choose - // Optimized interlacing and you set your output frame rate to a value that isn't - // suitable for optimized interlacing, MediaConvert automatically falls back to - // basic interlacing. Required settings: To use optimized interlacing, you must set - // Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized - // interlacing for hard telecine outputs. You must also set Interlace mode - // (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // your input frame rate. In this situation, choose Optimized interlacing to create + // a better quality interlaced output. In this case, each progressive frame from + // the input corresponds to an interlaced field in the output. Keep the default + // value, Basic interlacing, for all other output frame rates. With basic + // interlacing, MediaConvert performs any frame rate conversion first and then + // interlaces the frames. When you choose Optimized interlacing and you set your + // output frame rate to a value that isn't suitable for optimized interlacing, + // MediaConvert automatically falls back to basic interlacing. Required settings: + // To use optimized interlacing, you must set Telecine to None or Soft. You can't + // use optimized interlacing for hard telecine outputs. You must also set Interlace + // mode to a value other than Progressive. ScanTypeConversionMode ProresScanTypeConversionMode // Ignore this setting unless your input frame rate is 23.976 or 24 frames per @@ -7173,15 +6838,14 @@ type ProresSettings struct { // PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio // to keep it synchronized with the video. Note that enabling this setting will // slightly reduce the duration of your video. Required settings: You must also set - // Framerate to 25. In your JSON job specification, set (framerateControl) to - // (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1. + // Framerate to 25. SlowPal ProresSlowPal // When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 // fps, and your output scan type is interlaced, you can optionally enable hard - // telecine (HARD) to create a smoother picture. When you keep the default value, - // None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without - // doing anything with the field polarity to create a smoother picture. + // telecine to create a smoother picture. When you keep the default value, None, + // MediaConvert does a standard frame rate conversion to 29.97 without doing + // anything with the field polarity to create a smoother picture. Telecine ProresTelecine noSmithyDocumentSerde @@ -7278,22 +6942,22 @@ type Rectangle struct { noSmithyDocumentSerde } -// Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio -// channel in each output of your job. With audio remixing, you can output more or -// fewer audio channels than your input audio source provides. +// Use Manual audio remixing to adjust audio levels for each audio channel in each +// output of your job. With audio remixing, you can output more or fewer audio +// channels than your input audio source provides. type RemixSettings struct { - // Channel mapping (ChannelMapping) contains the group of fields that hold the - // remixing value for each channel, in dB. Specify remix values to indicate how - // much of the content from your input audio channel you want in your output audio - // channels. Each instance of the InputChannels or InputChannelsFineTune array - // specifies these values for one output channel. Use one instance of this array - // for each output channel. In the console, each array corresponds to a column in - // the graphical depiction of the mapping matrix. The rows of the graphical matrix - // correspond to input channels. Valid values are within the range from -60 (mute) - // through 6. A setting of 0 passes the input channel unchanged to the output - // channel (no attenuation or amplification). Use InputChannels or - // InputChannelsFineTune to specify your remix values. Don't use both. + // Channel mapping contains the group of fields that hold the remixing value for + // each channel, in dB. Specify remix values to indicate how much of the content + // from your input audio channel you want in your output audio channels. Each + // instance of the InputChannels or InputChannelsFineTune array specifies these + // values for one output channel. Use one instance of this array for each output + // channel. In the console, each array corresponds to a column in the graphical + // depiction of the mapping matrix. The rows of the graphical matrix correspond to + // input channels. Valid values are within the range from -60 (mute) through 6. A + // setting of 0 passes the input channel unchanged to the output channel (no + // attenuation or amplification). Use InputChannels or InputChannelsFineTune to + // specify your remix values. Don't use both. ChannelMapping *ChannelMapping // Specify the number of audio channels from your input that you want to use in @@ -7426,32 +7090,29 @@ type S3EncryptionSettings struct { // content. AWS also encrypts the data keys themselves, using a customer master key // (CMK), and then stores the encrypted data keys alongside your encrypted content. // Use this setting to specify which AWS service manages the CMK. For simplest set - // up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to - // be managed by AWS Key Management Service (KMS), choose AWS KMS - // (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the - // AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your - // data keys. You can optionally choose to specify a different, customer managed - // CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the - // setting KMS ARN (kmsKeyArn). + // up, choose Amazon S3. If you want your master key to be managed by AWS Key + // Management Service (KMS), choose AWS KMS. By default, when you choose AWS KMS, + // KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to + // encrypt your data keys. You can optionally choose to specify a different, + // customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the + // key for the setting KMS ARN. EncryptionType S3ServerSideEncryptionType // Optionally, specify the encryption context that you want to use alongside your // KMS key. AWS KMS uses this encryption context as additional authenticated data // (AAD) to support authenticated encryption. This value must be a base64-encoded // UTF-8 string holding JSON which represents a string-string map. To use this - // setting, you must also set Server-side encryption (S3ServerSideEncryptionType) - // to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). For more information about encryption - // context, see: + // setting, you must also set Server-side encryption to AWS KMS. For more + // information about encryption context, see: // https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context. KmsEncryptionContext *string // Optionally, specify the customer master key (CMK) that you want to use to // encrypt the data key that AWS uses to encrypt your output content. Enter the // Amazon Resource Name (ARN) of the CMK. To use this setting, you must also set - // Server-side encryption (S3ServerSideEncryptionType) to AWS KMS - // (SERVER_SIDE_ENCRYPTION_KMS). If you set Server-side encryption to AWS KMS but - // don't specify a CMK here, AWS uses the AWS managed CMK associated with Amazon - // S3. + // Server-side encryption to AWS KMS. If you set Server-side encryption to AWS KMS + // but don't specify a CMK here, AWS uses the AWS managed CMK associated with + // Amazon S3. KmsKeyArn *string noSmithyDocumentSerde @@ -7462,16 +7123,13 @@ type S3EncryptionSettings struct { // the same output group, but different output from your video. For more // information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to SCC. type SccDestinationSettings struct { - // Set Framerate (SccDestinationFramerate) to make sure that the captions and the - // video are synchronized in the output. Specify a frame rate that matches the - // frame rate of the associated video. If the video frame rate is 29.97, choose - // 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has - // video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 - // non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). + // Set Framerate to make sure that the captions and the video are synchronized in + // the output. Specify a frame rate that matches the frame rate of the associated + // video. If the video frame rate is 29.97, choose 29.97 dropframe only if the + // video has video_insertion=true and drop_frame_timecode=true; otherwise, choose + // 29.97 non-dropframe. Framerate SccDestinationFramerate noSmithyDocumentSerde @@ -7538,16 +7196,14 @@ type SpekeKeyProviderCmaf struct { // Settings related to SRT captions. SRT is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions in -// the same output group, but different output from your video. When you work -// directly in your JSON job specification, include this object and any required -// children when you set destinationType to SRT. +// the same output group, but different output from your video. type SrtDestinationSettings struct { - // Set Style passthrough (StylePassthrough) to ENABLED to use the available style, - // color, and position information from your input captions. MediaConvert uses - // default settings for any missing style and position information in your input - // captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style - // and position information from your input captions and use simplified output + // Set Style passthrough to ENABLED to use the available style, color, and + // position information from your input captions. MediaConvert uses default + // settings for any missing style and position information in your input captions. + // Set Style passthrough to DISABLED, or leave blank, to ignore the style and + // position information from your input captions and use simplified output // captions. StylePassthrough SrtStylePassthrough @@ -7567,7 +7223,7 @@ type StaticKeyProvider struct { KeyFormatVersions *string // Relates to DRM implementation. Use a 32-character hexidecimal string to specify - // Key Value (StaticKeyValue). + // Key Value. StaticKeyValue *string // Relates to DRM implementation. The location of the license server used for @@ -7580,8 +7236,6 @@ type StaticKeyProvider struct { // Settings related to teletext captions. Set up teletext captions in the same // output as your video. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to TELETEXT. type TeletextDestinationSettings struct { // Set pageNumber to the Teletext page number for the destination captions for @@ -7591,10 +7245,10 @@ type TeletextDestinationSettings struct { PageNumber *string // Specify the page types for this Teletext page. If you don't specify a value - // here, the service sets the page type to the default value Subtitle - // (PAGE_TYPE_SUBTITLE). If you pass through the entire set of Teletext data, don't - // use this field. When you pass through a set of Teletext pages, your output has - // the same page types as your input. + // here, the service sets the page type to the default value Subtitle. If you pass + // through the entire set of Teletext data, don't use this field. When you pass + // through a set of Teletext pages, your output has the same page types as your + // input. PageTypes []TeletextPageType noSmithyDocumentSerde @@ -7603,9 +7257,9 @@ type TeletextDestinationSettings struct { // Settings specific to Teletext caption sources, including Page number. type TeletextSourceSettings struct { - // Use Page Number (PageNumber) to specify the three-digit hexadecimal page number - // that will be used for Teletext captions. Do not use this setting if you are - // passing through teletext from the input source to output. + // Use Page Number to specify the three-digit hexadecimal page number that will be + // used for Teletext captions. Do not use this setting if you are passing through + // teletext from the input source to output. PageNumber *string noSmithyDocumentSerde @@ -7614,19 +7268,19 @@ type TeletextSourceSettings struct { // Settings for burning the output timecode and specified prefix into the output. type TimecodeBurnin struct { - // Use Font Size (FontSize) to set the font size of any burned-in timecode. Valid - // values are 10, 16, 32, 48. + // Use Font size to set the font size of any burned-in timecode. Valid values are + // 10, 16, 32, 48. FontSize int32 - // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to - // specify the location the burned-in timecode on output video. + // Use Position under Timecode burn-in to specify the location the burned-in + // timecode on output video. Position TimecodeBurninPosition - // Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. - // For example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". - // Provide either the characters themselves or the ASCII code equivalents. The - // supported range of characters is 0x20 through 0x7e. This includes letters, - // numbers, and all special characters represented on a standard English keyboard. + // Use Prefix to place ASCII characters before any burned-in timecode. For + // example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". Provide + // either the characters themselves or the ASCII code equivalents. The supported + // range of characters is 0x20 through 0x7e. This includes letters, numbers, and + // all special characters represented on a standard English keyboard. Prefix *string noSmithyDocumentSerde @@ -7637,51 +7291,47 @@ type TimecodeBurnin struct { type TimecodeConfig struct { // If you use an editing platform that relies on an anchor timecode, use Anchor - // Timecode (Anchor) to specify a timecode that will match the input video frame to - // the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or + // Timecode to specify a timecode that will match the input video frame to the + // output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or // (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior for - // Anchor Timecode varies depending on your setting for Source (TimecodeSource). * - // If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), the first - // input frame is the specified value in Start Timecode (Start). Anchor Timecode - // (Anchor) and Start Timecode (Start) are used calculate output timecode. * If - // Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame is - // 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), the - // first frame is the timecode value on the first input frame of the input. + // Anchor Timecode varies depending on your setting for Source. * If Source is set + // to Specified Start, the first input frame is the specified value in Start + // Timecode. Anchor Timecode and Start Timecode are used calculate output timecode. + // * If Source is set to Start at 0 the first frame is 00:00:00:00. * If Source is + // set to Embedded, the first frame is the timecode value on the first input frame + // of the input. Anchor *string - // Use Source (TimecodeSource) to set how timecodes are handled within this job. - // To make sure that your video, audio, captions, and markers are synchronized and - // that time-based features, such as image inserter, work correctly, choose the - // Timecode source option that matches your assets. All timecodes are in a 24-hour - // format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode - // that is in the input video. If no embedded timecode is in the source, the - // service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set - // the timecode of the initial frame to 00:00:00:00. * Specified Start - // (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than - // zero. You use Start timecode (Start) to provide this value. + // Use Source to set how timecodes are handled within this job. To make sure that + // your video, audio, captions, and markers are synchronized and that time-based + // features, such as image inserter, work correctly, choose the Timecode source + // option that matches your assets. All timecodes are in a 24-hour format with + // frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the input + // video. If no embedded timecode is in the source, the service will use Start at 0 + // instead. * Start at 0 - Set the timecode of the initial frame to 00:00:00:00. * + // Specified Start - Set the timecode of the initial frame to a value other than + // zero. You use Start timecode to provide this value. Source TimecodeSource - // Only use when you set Source (TimecodeSource) to Specified start - // (SPECIFIEDSTART). Use Start timecode (Start) to specify the timecode for the - // initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or - // (HH:MM:SS;FF). + // Only use when you set Source to Specified start. Use Start timecode to specify + // the timecode for the initial frame. Use 24-hour format with frame number, + // (HH:MM:SS:FF) or (HH:MM:SS;FF). Start *string // Only applies to outputs that support program-date-time stamp. Use Timestamp - // offset (TimestampOffset) to overwrite the timecode date without affecting the - // time and frame number. Provide the new date as a string in the format - // "yyyy-mm-dd". To use Time stamp offset, you must also enable Insert - // program-date-time (InsertProgramDateTime) in the output settings. For example, - // if the date part of your timecodes is 2002-1-25 and you want to change it to one - // year later, set Timestamp offset (TimestampOffset) to 2003-1-25. + // offset to overwrite the timecode date without affecting the time and frame + // number. Provide the new date as a string in the format "yyyy-mm-dd". To use + // Timestamp offset, you must also enable Insert program-date-time in the output + // settings. For example, if the date part of your timecodes is 2002-1-25 and you + // want to change it to one year later, set Timestamp offset to 2003-1-25. TimestampOffset *string noSmithyDocumentSerde } -// Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you -// specify. In each output that you want to include this metadata, you must set ID3 -// metadata (timedMetadata) to Passthrough (PASSTHROUGH). +// Insert user-defined custom ID3 metadata at timecodes that you specify. In each +// output that you want to include this metadata, you must set ID3 metadata to +// Passthrough. type TimedMetadataInsertion struct { // Id3Insertions contains the array of Id3Insertion instances. @@ -7728,8 +7378,6 @@ type TrackSourceSettings struct { // the same output group, but different output from your video. For more // information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to TTML. type TtmlDestinationSettings struct { // Pass through style and position information from a TTML-like input source @@ -7739,8 +7387,7 @@ type TtmlDestinationSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value VC3 +// Required when you set Codec to the value VC3 type Vc3Settings struct { // If you are using the console, use the Framerate setting to specify the frame @@ -7748,12 +7395,7 @@ type Vc3Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list or choose Custom. The framerates shown in the // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // specify your frame rate as a fraction. FramerateControl Vc3FramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -7790,56 +7432,54 @@ type Vc3Settings struct { InterlaceMode Vc3InterlaceMode // Use this setting for interlaced outputs, when your output frame rate is half of - // your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced field - // in the output. Keep the default value, Basic interlacing (INTERLACED), for all - // other output frame rates. With basic interlacing, MediaConvert performs any - // frame rate conversion first and then interlaces the frames. When you choose - // Optimized interlacing and you set your output frame rate to a value that isn't - // suitable for optimized interlacing, MediaConvert automatically falls back to - // basic interlacing. Required settings: To use optimized interlacing, you must set - // Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized - // interlacing for hard telecine outputs. You must also set Interlace mode - // (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // your input frame rate. In this situation, choose Optimized interlacing to create + // a better quality interlaced output. In this case, each progressive frame from + // the input corresponds to an interlaced field in the output. Keep the default + // value, Basic interlacing, for all other output frame rates. With basic + // interlacing, MediaConvert performs any frame rate conversion first and then + // interlaces the frames. When you choose Optimized interlacing and you set your + // output frame rate to a value that isn't suitable for optimized interlacing, + // MediaConvert automatically falls back to basic interlacing. Required settings: + // To use optimized interlacing, you must set Telecine to None or Soft. You can't + // use optimized interlacing for hard telecine outputs. You must also set Interlace + // mode to a value other than Progressive. ScanTypeConversionMode Vc3ScanTypeConversionMode // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output by relabeling the video // frames and resampling your audio. Note that enabling this setting will slightly // reduce the duration of your video. Related settings: You must also set Framerate - // to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), - // (framerateNumerator) to 25 and (framerateDenominator) to 1. + // to 25. SlowPal Vc3SlowPal // When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 // fps, and your output scan type is interlaced, you can optionally enable hard - // telecine (HARD) to create a smoother picture. When you keep the default value, - // None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without - // doing anything with the field polarity to create a smoother picture. + // telecine to create a smoother picture. When you keep the default value, None, + // MediaConvert does a standard frame rate conversion to 29.97 without doing + // anything with the field polarity to create a smoother picture. Telecine Vc3Telecine // Specify the VC3 class to choose the quality characteristics for this output. // VC3 class, together with the settings Framerate (framerateNumerator and // framerateDenominator) and Resolution (height and width), determine your output // bitrate. For example, say that your video resolution is 1920x1080 and your - // framerate is 29.97. Then Class 145 (CLASS_145) gives you an output with a - // bitrate of approximately 145 Mbps and Class 220 (CLASS_220) gives you and output - // with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit - // depth of your output. + // framerate is 29.97. Then Class 145 gives you an output with a bitrate of + // approximately 145 Mbps and Class 220 gives you and output with a bitrate of + // approximately 220 Mbps. VC3 class also specifies the color bit depth of your + // output. Vc3Class Vc3Class noSmithyDocumentSerde } -// Video codec settings, (CodecSettings) under (VideoDescription), contains the -// group of settings related to video encoding. The settings in this group vary -// depending on the value that you choose for Video codec (Codec). For each codec -// enum that you choose, define the corresponding settings object. The following -// lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, -// AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * -// H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, -// Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings +// Video codec settings contains the group of settings related to video encoding. +// The settings in this group vary depending on the value that you choose for Video +// codec. For each codec enum that you choose, define the corresponding settings +// object. The following lists the codec enum, settings object pairs. * AV1, +// Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings +// * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, +// ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, +// XavcSettings type VideoCodecSettings struct { // Required when you set Codec, under VideoDescription>CodecSettings to the value @@ -7861,56 +7501,46 @@ type VideoCodecSettings struct { // container must be MXF or QuickTime MOV. Codec VideoCodec - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value FRAME_CAPTURE. + // Required when you set Codec to the value FRAME_CAPTURE. FrameCaptureSettings *FrameCaptureSettings - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value H_264. + // Required when you set Codec to the value H_264. H264Settings *H264Settings // Settings for H265 codec H265Settings *H265Settings - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value MPEG2. + // Required when you set Codec to the value MPEG2. Mpeg2Settings *Mpeg2Settings - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value PRORES. + // Required when you set Codec to the value PRORES. ProresSettings *ProresSettings - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value VC3 + // Required when you set Codec to the value VC3 Vc3Settings *Vc3Settings - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value VP8. + // Required when you set Codec to the value VP8. Vp8Settings *Vp8Settings - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value VP9. + // Required when you set Codec to the value VP9. Vp9Settings *Vp9Settings - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the - // value XAVC. + // Required when you set Codec to the value XAVC. XavcSettings *XavcSettings noSmithyDocumentSerde } // Settings related to video encoding of your output. The specific video settings -// depend on the video codec that you choose. When you work directly in your JSON -// job specification, include one instance of Video description (VideoDescription) -// per output. +// depend on the video codec that you choose. type VideoDescription struct { // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD - // signaling (AfdSignaling) to specify whether the service includes AFD values in - // the output video data and what those values are. * Choose None to remove all AFD - // values from this output. * Choose Fixed to ignore input AFD values and instead - // encode the value specified in the job. * Choose Auto to calculate output AFD - // values based on the input AFD scaler data. + // signaling to specify whether the service includes AFD values in the output video + // data and what those values are. * Choose None to remove all AFD values from this + // output. * Choose Fixed to ignore input AFD values and instead encode the value + // specified in the job. * Choose Auto to calculate output AFD values based on the + // input AFD scaler data. AfdSignaling AfdSignaling // The anti-alias filter is automatically applied to all outputs. The service no @@ -7918,34 +7548,33 @@ type VideoDescription struct { // job, the service will ignore the setting. AntiAlias AntiAlias - // Video codec settings, (CodecSettings) under (VideoDescription), contains the - // group of settings related to video encoding. The settings in this group vary - // depending on the value that you choose for Video codec (Codec). For each codec - // enum that you choose, define the corresponding settings object. The following - // lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, - // AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * - // H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, - // Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings + // Video codec settings contains the group of settings related to video encoding. + // The settings in this group vary depending on the value that you choose for Video + // codec. For each codec enum that you choose, define the corresponding settings + // object. The following lists the codec enum, settings object pairs. * AV1, + // Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings + // * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, + // ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, + // XavcSettings CodecSettings *VideoCodecSettings - // Choose Insert (INSERT) for this setting to include color metadata in this - // output. Choose Ignore (IGNORE) to exclude color metadata from this output. If - // you don't specify a value, the service sets this to Insert by default. + // Choose Insert for this setting to include color metadata in this output. Choose + // Ignore to exclude color metadata from this output. If you don't specify a value, + // the service sets this to Insert by default. ColorMetadata ColorMetadata - // Use Cropping selection (crop) to specify the video area that the service will - // include in the output video frame. + // Use Cropping selection to specify the video area that the service will include + // in the output video frame. Crop *Rectangle // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled - // by default when Timecode insertion (TimecodeInsertion) is enabled. + // by default when Timecode insertion is enabled. DropFrameTimecode DropFrameTimecode - // Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use Fixed - // (FixedAfd) to specify a four-bit AFD value which the service will write on all - // frames of this video output. + // Applies only if you set AFD Signaling to Fixed. Use Fixed to specify a four-bit + // AFD value which the service will write on all frames of this video output. FixedAfd int32 // Use Height to define the video resolution height, in pixels, for this output. @@ -7955,52 +7584,47 @@ type VideoDescription struct { // your output will be 1280x720. Height int32 - // Use Selection placement (position) to define the video area in your output - // frame. The area outside of the rectangle that you specify here is black. + // Use Selection placement to define the video area in your output frame. The area + // outside of the rectangle that you specify here is black. Position *Rectangle - // Use Respond to AFD (RespondToAfd) to specify how the service changes the video - // itself in response to AFD values in the input. * Choose Respond to clip the - // input video frame according to the AFD value, input display aspect ratio, and - // output display aspect ratio. * Choose Passthrough to include the input AFD - // values. Do not choose this when AfdSignaling is set to (NONE). A preferred - // implementation of this workflow is to set RespondToAfd to (NONE) and set - // AfdSignaling to (AUTO). * Choose None to remove all input AFD values from this - // output. + // Use Respond to AFD to specify how the service changes the video itself in + // response to AFD values in the input. * Choose Respond to clip the input video + // frame according to the AFD value, input display aspect ratio, and output display + // aspect ratio. * Choose Passthrough to include the input AFD values. Do not + // choose this when AfdSignaling is set to NONE. A preferred implementation of this + // workflow is to set RespondToAfd to and set AfdSignaling to AUTO. * Choose None + // to remove all input AFD values from this output. RespondToAfd RespondToAfd // Specify how the service handles outputs that have a different aspect ratio from - // the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) to have the - // service stretch your video image to fit. Keep the setting Default (DEFAULT) to - // have the service letterbox your video instead. This setting overrides any value - // that you specify for the setting Selection placement (position) in this output. + // the input aspect ratio. Choose Stretch to output to have the service stretch + // your video image to fit. Keep the setting Default to have the service letterbox + // your video instead. This setting overrides any value that you specify for the + // setting Selection placement in this output. ScalingBehavior ScalingBehavior - // Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. - // This setting changes the width of the anti-alias filter kernel used for scaling. - // Sharpness only applies if your output resolution is different from your input - // resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended for - // most content. + // Use Sharpness setting to specify the strength of anti-aliasing. This setting + // changes the width of the anti-alias filter kernel used for scaling. Sharpness + // only applies if your output resolution is different from your input resolution. + // 0 is the softest setting, 100 the sharpest, and 50 recommended for most content. Sharpness int32 // Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode // insertion when the input frame rate is identical to the output frame rate. To - // include timecodes in this output, set Timecode insertion - // (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to - // DISABLED. Default is DISABLED. When the service inserts timecodes in an output, - // by default, it uses any embedded timecodes from the input. If none are present, - // the service will set the timecode for the first output frame to zero. To change - // this default behavior, adjust the settings under Timecode configuration - // (TimecodeConfig). In the console, these settings are located under Job > Job - // settings > Timecode configuration. Note - Timecode source under input settings - // (InputTimecodeSource) does not affect the timecodes that are inserted in the - // output. Source under Job settings > Timecode configuration (TimecodeSource) - // does. + // include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To + // leave them out, set it to DISABLED. Default is DISABLED. When the service + // inserts timecodes in an output, by default, it uses any embedded timecodes from + // the input. If none are present, the service will set the timecode for the first + // output frame to zero. To change this default behavior, adjust the settings under + // Timecode configuration. In the console, these settings are located under Job > + // Job settings > Timecode configuration. Note - Timecode source under input + // settings does not affect the timecodes that are inserted in the output. Source + // under Job settings > Timecode configuration does. TimecodeInsertion VideoTimecodeInsertion - // Find additional transcoding features under Preprocessors (VideoPreprocessors). - // Enable the features at each output individually. These features are disabled by - // default. + // Find additional transcoding features under Preprocessors. Enable the features + // at each output individually. These features are disabled by default. VideoPreprocessors *VideoPreprocessor // Use Width to define the video resolution width, in pixels, for this output. To @@ -8025,9 +7649,8 @@ type VideoDetail struct { noSmithyDocumentSerde } -// Find additional transcoding features under Preprocessors (VideoPreprocessors). -// Enable the features at each output individually. These features are disabled by -// default. +// Find additional transcoding features under Preprocessors. Enable the features +// at each output individually. These features are disabled by default. type VideoPreprocessor struct { // Use these settings to convert the color space or to modify properties such as @@ -8046,9 +7669,9 @@ type VideoPreprocessor struct { // Enable HDR10+ analysis and metadata injection. Compatible with HEVC only. Hdr10Plus *Hdr10Plus - // Enable the Image inserter (ImageInserter) feature to include a graphic overlay - // on your video. Enable or disable this feature for each output individually. This - // setting is disabled by default. + // Enable the Image inserter feature to include a graphic overlay on your video. + // Enable or disable this feature for each output individually. This setting is + // disabled by default. ImageInserter *ImageInserter // Enable the Noise reducer feature to remove noise from your video output if @@ -8101,22 +7724,19 @@ type VideoSelector struct { ColorSpace ColorSpace // There are two sources for color metadata, the input file and the job input - // settings Color space (ColorSpace) and HDR master display information - // settings(Hdr10Metadata). The Color space usage setting determines which takes - // precedence. Choose Force (FORCE) to use color metadata from the input job - // settings. If you don't specify values for those settings, the service defaults - // to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use - // color metadata from the source when it is present. If there's no color metadata - // in your input file, the service defaults to using values you specify in the - // input settings. + // settings Color space and HDR master display information settings. The Color + // space usage setting determines which takes precedence. Choose Force to use color + // metadata from the input job settings. If you don't specify values for those + // settings, the service defaults to using metadata from your input. FALLBACK - + // Choose Fallback to use color metadata from the source when it is present. If + // there's no color metadata in your input file, the service defaults to using + // values you specify in the input settings. ColorSpaceUsage ColorSpaceUsage - // Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM - // (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified - // Digital Video Pack Metadata (MDPM). When you do, we recommend you also set - // Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded - // timecode override blank, or set to None (NONE), when your input does not contain - // MDPM timecode. + // Set Embedded timecode override to Use MDPM when your AVCHD input contains + // timecode tag data in the Modified Digital Video Pack Metadata. When you do, we + // recommend you also set Timecode source to Embedded. Leave Embedded timecode + // override blank, or set to None, when your input does not contain MDPM timecode. EmbeddedTimecodeOverride EmbeddedTimecodeOverride // Use these settings to provide HDR 10 metadata that is missing or inaccurate in @@ -8124,51 +7744,49 @@ type VideoSelector struct { // be provided by a color grader. The color grader generates these values during // the HDR 10 mastering process. The valid range for each of these settings is 0 to // 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related - // settings - When you specify these values, you must also set Color space - // (ColorSpace) to HDR 10 (HDR10). To specify whether the the values you specify - // here take precedence over the values in the metadata of your input file, set - // Color space usage (ColorSpaceUsage). To specify whether color metadata is - // included in an output, set Color metadata (ColorMetadata). For more information - // about MediaConvert HDR jobs, see + // settings - When you specify these values, you must also set Color space to HDR + // 10. To specify whether the the values you specify here take precedence over the + // values in the metadata of your input file, set Color space usage. To specify + // whether color metadata is included in an output, set Color metadata. For more + // information about MediaConvert HDR jobs, see // https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata // Use this setting if your input has video and audio durations that don't align, // and your output or player has strict alignment requirements. Examples: Input // audio track has a delayed start. Input video track ends before audio ends. When - // you set Pad video (padVideo) to Black (BLACK), MediaConvert generates black - // video frames so that output video and audio durations match. Black video frames - // are added at the beginning or end, depending on your input. To keep the default - // behavior and not generate black video, set Pad video to Disabled (DISABLED) or - // leave blank. + // you set Pad video to Black, MediaConvert generates black video frames so that + // output video and audio durations match. Black video frames are added at the + // beginning or end, depending on your input. To keep the default behavior and not + // generate black video, set Pad video to Disabled or leave blank. PadVideo PadVideo - // Use PID (Pid) to select specific video data from an input file. Specify this - // value as an integer; the system automatically converts it to the hexidecimal - // value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an - // identifier for a set of data in an MPEG-2 transport stream container. + // Use PID to select specific video data from an input file. Specify this value as + // an integer; the system automatically converts it to the hexidecimal value. For + // example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier + // for a set of data in an MPEG-2 transport stream container. Pid int32 // Selects a specific program from within a multi-program transport stream. Note // that Quad 4K is not currently supported. ProgramNumber int32 - // Use Rotate (InputRotate) to specify how the service rotates your video. You can - // choose automatic rotation or specify a rotation. You can specify a clockwise - // rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or - // .mp4 and your input has rotation metadata, you can choose Automatic to have the - // service rotate your video according to the rotation specified in the metadata. - // The rotation must be within one degree of 90, 180, or 270 degrees. If the - // rotation metadata specifies any other rotation, the service will default to no - // rotation. By default, the service does no rotation, even if your input video has - // rotation metadata. The service doesn't pass through rotation metadata. + // Use Rotate to specify how the service rotates your video. You can choose + // automatic rotation or specify a rotation. You can specify a clockwise rotation + // of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and + // your input has rotation metadata, you can choose Automatic to have the service + // rotate your video according to the rotation specified in the metadata. The + // rotation must be within one degree of 90, 180, or 270 degrees. If the rotation + // metadata specifies any other rotation, the service will default to no rotation. + // By default, the service does no rotation, even if your input video has rotation + // metadata. The service doesn't pass through rotation metadata. Rotate InputRotate // If the sample range metadata in your input video is accurate, or if you don't - // know about sample range, keep the default value, Follow (FOLLOW), for this - // setting. When you do, the service automatically detects your input sample range. - // If your input video has metadata indicating the wrong sample range, specify the - // accurate sample range here. When you do, MediaConvert ignores any sample range + // know about sample range, keep the default value, Follow, for this setting. When + // you do, the service automatically detects your input sample range. If your input + // video has metadata indicating the wrong sample range, specify the accurate + // sample range here. When you do, MediaConvert ignores any sample range // information in the input metadata. Regardless of whether MediaConvert uses the // input sample range or the sample range that you specify, MediaConvert uses the // sample range for transcoding and also writes it to the output metadata. @@ -8199,8 +7817,7 @@ type VorbisSettings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value VP8. +// Required when you set Codec to the value VP8. type Vp8Settings struct { // Target bitrate in bits/second. For example, enter five megabits per second as @@ -8212,12 +7829,7 @@ type Vp8Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list or choose Custom. The framerates shown in the // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // specify your frame rate as a fraction. FramerateControl Vp8FramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -8263,33 +7875,29 @@ type Vp8Settings struct { MaxBitrate int32 // Optional. Specify how the service determines the pixel aspect ratio (PAR) for - // this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses - // the PAR from your input video for your output. To specify a different PAR in the - // console, choose any value other than Follow source. To specify a different PAR - // by editing the JSON job specification, choose SPECIFIED. When you choose - // SPECIFIED for this setting, you must also specify values for the parNumerator - // and parDenominator settings. + // this output. The default behavior, Follow source, uses the PAR from your input + // video for your output. To specify a different PAR in the console, choose any + // value other than Follow source. When you choose SPECIFIED for this setting, you + // must also specify values for the parNumerator and parDenominator settings. ParControl Vp8ParControl - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is 33. ParDenominator int32 - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator int32 - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want - // to trade off encoding speed for output video quality. The default behavior is - // faster, lower quality, multi-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // multi-pass encoding. QualityTuningLevel Vp8QualityTuningLevel // With the VP8 codec, you can use only the variable bitrate (VBR) rate control @@ -8299,8 +7907,7 @@ type Vp8Settings struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value VP9. +// Required when you set Codec to the value VP9. type Vp9Settings struct { // Target bitrate in bits/second. For example, enter five megabits per second as @@ -8312,12 +7919,7 @@ type Vp9Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list or choose Custom. The framerates shown in the // dropdown list are decimal approximations of fractions. If you choose Custom, - // specify your frame rate as a fraction. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate you - // specify in the settings FramerateNumerator and FramerateDenominator. + // specify your frame rate as a fraction. FramerateControl Vp9FramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -8367,25 +7969,23 @@ type Vp9Settings struct { // video. ParControl Vp9ParControl - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is 33. ParDenominator int32 - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value for - // parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator int32 - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want - // to trade off encoding speed for output video quality. The default behavior is - // faster, lower quality, multi-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // multi-pass encoding. QualityTuningLevel Vp9QualityTuningLevel // With the VP9 codec, you can use only the variable bitrate (VBR) rate control @@ -8413,12 +8013,11 @@ type WarningGroup struct { noSmithyDocumentSerde } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the -// value WAV. +// Required when you set Codec to the value WAV. type WavSettings struct { - // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding - // quality for this audio track. + // Specify Bit depth, in bits per sample, to choose the encoding quality for this + // audio track. BitDepth int32 // Specify the number of channels in this output audio track. Valid values are 1 @@ -8441,8 +8040,6 @@ type WavSettings struct { // captions in the same output group, but different output from your video. For // more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. -// When you work directly in your JSON job specification, include this object and -// any required children when you set destinationType to WebVTT. type WebvttDestinationSettings struct { // If the WebVTT captions track is intended to provide accessibility for people @@ -8459,14 +8056,14 @@ type WebvttDestinationSettings struct { Accessibility WebvttAccessibilitySubs // To use the available style, color, and position information from your input - // captions: Set Style passthrough (stylePassthrough) to Enabled (ENABLED). - // MediaConvert uses default settings when style and position information is - // missing from your input captions. To recreate the input captions exactly: Set - // Style passthrough to Strict (STRICT). MediaConvert automatically applies timing - // adjustments, including adjustments for frame rate conversion, ad avails, and - // input clipping. Your input captions format must be WebVTT. To ignore the style - // and position information from your input captions and use simplified output - // captions: Set Style passthrough to Disabled (DISABLED), or leave blank. + // captions: Set Style passthrough to Enabled. MediaConvert uses default settings + // when style and position information is missing from your input captions. To + // recreate the input captions exactly: Set Style passthrough to Strict. + // MediaConvert automatically applies timing adjustments, including adjustments for + // frame rate conversion, ad avails, and input clipping. Your input captions format + // must be WebVTT. To ignore the style and position information from your input + // captions and use simplified output captions: Set Style passthrough to Disabled, + // or leave blank. StylePassthrough WebvttStylePassthrough noSmithyDocumentSerde @@ -8494,9 +8091,7 @@ type WebvttHlsSourceSettings struct { noSmithyDocumentSerde } -// Required when you set (Profile) under -// (VideoDescription)>(CodecSettings)>(XavcSettings) to the value -// XAVC_4K_INTRA_CBG. +// Required when you set Profile to the value XAVC_4K_INTRA_CBG. type Xavc4kIntraCbgProfileSettings struct { // Specify the XAVC Intra 4k (CBG) Class to set the bitrate of your output. @@ -8507,9 +8102,7 @@ type Xavc4kIntraCbgProfileSettings struct { noSmithyDocumentSerde } -// Required when you set (Profile) under -// (VideoDescription)>(CodecSettings)>(XavcSettings) to the value -// XAVC_4K_INTRA_VBR. +// Required when you set Profile to the value XAVC_4K_INTRA_VBR. type Xavc4kIntraVbrProfileSettings struct { // Specify the XAVC Intra 4k (VBR) Class to set the bitrate of your output. @@ -8520,8 +8113,7 @@ type Xavc4kIntraVbrProfileSettings struct { noSmithyDocumentSerde } -// Required when you set (Profile) under -// (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K. +// Required when you set Profile to the value XAVC_4K. type Xavc4kProfileSettings struct { // Specify the XAVC 4k (Long GOP) Bitrate Class to set the bitrate of your output. @@ -8533,26 +8125,25 @@ type Xavc4kProfileSettings struct { // High, 10-bit, 4:2:2 (HIGH_422). These profiles are specified in ITU-T H.264. CodecProfile Xavc4kProfileCodecProfile - // The best way to set up adaptive quantization is to keep the default value, Auto - // (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When - // you do so, MediaConvert automatically applies the best types of quantization for - // your video content. Include this setting in your JSON job specification only - // when you choose to change the default value for Adaptive quantization. Enable - // this setting to have the encoder reduce I-frame pop. I-frame pop appears as a - // visual flicker that can arise when the encoder saves bits by copying some - // macroblocks many times from frame to frame, and then refreshes them at the - // I-frame. When you enable this setting, the encoder updates these macroblocks - // slightly more often to smooth out the flicker. This setting is disabled by - // default. Related setting: In addition to enabling this setting, you must also - // set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) - // or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that - // Flicker adaptive quantization provides. + // The best way to set up adaptive quantization is to keep the default value, + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose to + // change the default value for Adaptive quantization. Enable this setting to have + // the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can + // arise when the encoder saves bits by copying some macroblocks many times from + // frame to frame, and then refreshes them at the I-frame. When you enable this + // setting, the encoder updates these macroblocks slightly more often to smooth out + // the flicker. This setting is disabled by default. Related setting: In addition + // to enabling this setting, you must also set Adaptive quantization to a value + // other than Off or Auto. Use Adaptive quantization to adjust the degree of + // smoothing that Flicker adaptive quantization provides. FlickerAdaptiveQuantization XavcFlickerAdaptiveQuantization // Specify whether the encoder uses B-frames as reference frames for other - // pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use - // B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the - // encoder from using B-frames as reference frames. + // pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as + // reference frames. Choose Don't allow to prevent the encoder from using B-frames + // as reference frames. GopBReference XavcGopBReference // Frequency of closed GOPs. In streaming applications, it is recommended that @@ -8566,9 +8157,9 @@ type Xavc4kProfileSettings struct { // calculates the default by doubling the bitrate of this output point. HrdBufferSize int32 - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want - // to trade off encoding speed for output video quality. The default behavior is - // faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel Xavc4kProfileQualityTuningLevel // Number of slices per picture. Must be less than or equal to the number of @@ -8579,9 +8170,7 @@ type Xavc4kProfileSettings struct { noSmithyDocumentSerde } -// Required when you set (Profile) under -// (VideoDescription)>(CodecSettings)>(XavcSettings) to the value -// XAVC_HD_INTRA_CBG. +// Required when you set Profile to the value XAVC_HD_INTRA_CBG. type XavcHdIntraCbgProfileSettings struct { // Specify the XAVC Intra HD (CBG) Class to set the bitrate of your output. @@ -8592,8 +8181,7 @@ type XavcHdIntraCbgProfileSettings struct { noSmithyDocumentSerde } -// Required when you set (Profile) under -// (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD. +// Required when you set Profile to the value XAVC_HD. type XavcHdProfileSettings struct { // Specify the XAVC HD (Long GOP) Bitrate Class to set the bitrate of your output. @@ -8601,26 +8189,25 @@ type XavcHdProfileSettings struct { // that are valid for that class. BitrateClass XavcHdProfileBitrateClass - // The best way to set up adaptive quantization is to keep the default value, Auto - // (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When - // you do so, MediaConvert automatically applies the best types of quantization for - // your video content. Include this setting in your JSON job specification only - // when you choose to change the default value for Adaptive quantization. Enable - // this setting to have the encoder reduce I-frame pop. I-frame pop appears as a - // visual flicker that can arise when the encoder saves bits by copying some - // macroblocks many times from frame to frame, and then refreshes them at the - // I-frame. When you enable this setting, the encoder updates these macroblocks - // slightly more often to smooth out the flicker. This setting is disabled by - // default. Related setting: In addition to enabling this setting, you must also - // set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) - // or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that - // Flicker adaptive quantization provides. + // The best way to set up adaptive quantization is to keep the default value, + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose to + // change the default value for Adaptive quantization. Enable this setting to have + // the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can + // arise when the encoder saves bits by copying some macroblocks many times from + // frame to frame, and then refreshes them at the I-frame. When you enable this + // setting, the encoder updates these macroblocks slightly more often to smooth out + // the flicker. This setting is disabled by default. Related setting: In addition + // to enabling this setting, you must also set Adaptive quantization to a value + // other than Off or Auto. Use Adaptive quantization to adjust the degree of + // smoothing that Flicker adaptive quantization provides. FlickerAdaptiveQuantization XavcFlickerAdaptiveQuantization // Specify whether the encoder uses B-frames as reference frames for other - // pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use - // B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the - // encoder from using B-frames as reference frames. + // pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as + // reference frames. Choose Don't allow to prevent the encoder from using B-frames + // as reference frames. GopBReference XavcGopBReference // Frequency of closed GOPs. In streaming applications, it is recommended that @@ -8635,22 +8222,20 @@ type XavcHdProfileSettings struct { HrdBufferSize int32 // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type of - // your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom - // (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the - // source. For jobs that have multiple inputs, the output field polarity might - // change over the course of the output. Follow behavior depends on the input scan - // type. If the source is interlaced, the output will be interlaced with the same - // polarity as the source. If the source is progressive, the output will be - // interlaced with top field bottom field first, depending on which of the Follow - // options you choose. + // to create a progressive output, regardless of the scan type of your input. Use + // Top field first or Bottom field first to create an output that's interlaced with + // the same field polarity throughout. Use Follow, default top or Follow, default + // bottom to produce outputs with the same field polarity as the source. For jobs + // that have multiple inputs, the output field polarity might change over the + // course of the output. Follow behavior depends on the input scan type. If the + // source is interlaced, the output will be interlaced with the same polarity as + // the source. If the source is progressive, the output will be interlaced with top + // field bottom field first, depending on which of the Follow options you choose. InterlaceMode XavcInterlaceMode - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want - // to trade off encoding speed for output video quality. The default behavior is - // faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel XavcHdProfileQualityTuningLevel // Number of slices per picture. Must be less than or equal to the number of @@ -8659,29 +8244,27 @@ type XavcHdProfileSettings struct { Slices int32 // Ignore this setting unless you set Frame rate (framerateNumerator divided by - // framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard - // (HARD). Otherwise, keep the default value None (NONE). For more information, see + // framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard. + // Otherwise, keep the default value None. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html. Telecine XavcHdProfileTelecine noSmithyDocumentSerde } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the -// value XAVC. +// Required when you set Codec to the value XAVC. type XavcSettings struct { - // Keep the default value, Auto (AUTO), for this setting to have MediaConvert + // Keep the default value, Auto, for this setting to have MediaConvert // automatically apply the best types of quantization for your video content. When // you want to apply your quantization settings manually, you must set Adaptive - // quantization (adaptiveQuantization) to a value other than Auto (AUTO). Use this - // setting to specify the strength of any adaptive quantization filters that you - // enable. If you don't want MediaConvert to do any adaptive quantization in this - // transcode, set Adaptive quantization to Off (OFF). Related settings: The value - // that you choose here applies to the following settings: Flicker adaptive - // quantization (flickerAdaptiveQuantization), Spatial adaptive quantization - // (spatialAdaptiveQuantization), and Temporal adaptive quantization - // (temporalAdaptiveQuantization). + // quantization to a value other than Auto. Use this setting to specify the + // strength of any adaptive quantization filters that you enable. If you don't want + // MediaConvert to do any adaptive quantization in this transcode, set Adaptive + // quantization to Off. Related settings: The value that you choose here applies to + // the following settings: Flicker adaptive quantization + // (flickerAdaptiveQuantization), Spatial adaptive quantization, and Temporal + // adaptive quantization. AdaptiveQuantization XavcAdaptiveQuantization // Optional. Choose a specific entropy encoding mode only when you want to @@ -8694,12 +8277,7 @@ type XavcSettings struct { // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose a // frame rate from the dropdown list. The framerates shown in the dropdown list are - // decimal approximations of fractions. If you are creating your transcoding job - // specification as a JSON file without the console, use FramerateControl to - // specify which value the service uses for the frame rate for this output. Choose - // INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the - // input. Choose SPECIFIED if you want the service to use the frame rate that you - // specify in the settings FramerateNumerator and FramerateDenominator. + // decimal approximations of fractions. FramerateControl XavcFramerateControl // Choose the method that you want MediaConvert to use when increasing or @@ -8741,85 +8319,73 @@ type XavcSettings struct { // second (fps). Enable slow PAL to create a 25 fps output by relabeling the video // frames and resampling your audio. Note that enabling this setting will slightly // reduce the duration of your video. Related settings: You must also set Frame - // rate to 25. In your JSON job specification, set (framerateControl) to - // (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1. + // rate to 25. SlowPal XavcSlowPal // Ignore this setting unless your downstream workflow requires that you specify // it explicitly. Otherwise, we recommend that you adjust the softness of your - // output by using a lower value for the setting Sharpness (sharpness) or by - // enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) - // setting specifies the quantization matrices that the encoder uses. Keep the - // default value, 0, for flat quantization. Choose the value 1 or 16 to use the - // default JVT softening quantization matricies from the H.264 specification. - // Choose a value from 17 to 128 to use planar interpolation. Increasing values - // from 17 to 128 result in increasing reduction of high-frequency data. The value - // 128 results in the softest video. + // output by using a lower value for the setting Sharpness or by enabling a noise + // reducer filter. The Softness setting specifies the quantization matrices that + // the encoder uses. Keep the default value, 0, for flat quantization. Choose the + // value 1 or 16 to use the default JVT softening quantization matricies from the + // H.264 specification. Choose a value from 17 to 128 to use planar interpolation. + // Increasing values from 17 to 128 result in increasing reduction of + // high-frequency data. The value 128 results in the softest video. Softness int32 - // The best way to set up adaptive quantization is to keep the default value, Auto - // (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you - // do so, MediaConvert automatically applies the best types of quantization for - // your video content. Include this setting in your JSON job specification only - // when you choose to change the default value for Adaptive quantization. For this - // setting, keep the default value, Enabled (ENABLED), to adjust quantization - // within each frame based on spatial variation of content complexity. When you - // enable this feature, the encoder uses fewer bits on areas that can sustain more - // distortion with no noticeable visual degradation and uses more bits on areas - // where any small distortion will be noticeable. For example, complex textured - // blocks are encoded with fewer bits and smooth textured blocks are encoded with - // more bits. Enabling this feature will almost always improve your video quality. - // Note, though, that this feature doesn't take into account where the viewer's - // attention is likely to be. If viewers are likely to be focusing their attention - // on a part of the screen with a lot of complex texture, you might choose to - // disable this feature. Related setting: When you enable spatial adaptive - // quantization, set the value for Adaptive quantization (adaptiveQuantization) - // depending on your content. For homogeneous content, such as cartoons and video - // games, set it to Low. For content with a wider variety of textures, set it to - // High or Higher. + // The best way to set up adaptive quantization is to keep the default value, + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose to + // change the default value for Adaptive quantization. For this setting, keep the + // default value, Enabled, to adjust quantization within each frame based on + // spatial variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas that can sustain more distortion with no + // noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, that + // this feature doesn't take into account where the viewer's attention is likely to + // be. If viewers are likely to be focusing their attention on a part of the screen + // with a lot of complex texture, you might choose to disable this feature. Related + // setting: When you enable spatial adaptive quantization, set the value for + // Adaptive quantization depending on your content. For homogeneous content, such + // as cartoons and video games, set it to Low. For content with a wider variety of + // textures, set it to High or Higher. SpatialAdaptiveQuantization XavcSpatialAdaptiveQuantization - // The best way to set up adaptive quantization is to keep the default value, Auto - // (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you - // do so, MediaConvert automatically applies the best types of quantization for - // your video content. Include this setting in your JSON job specification only - // when you choose to change the default value for Adaptive quantization. For this - // setting, keep the default value, Enabled (ENABLED), to adjust quantization - // within each frame based on temporal variation of content complexity. When you - // enable this feature, the encoder uses fewer bits on areas of the frame that - // aren't moving and uses more bits on complex objects with sharp edges that move a - // lot. For example, this feature improves the readability of text tickers on - // newscasts and scoreboards on sports matches. Enabling this feature will almost - // always improve your video quality. Note, though, that this feature doesn't take - // into account where the viewer's attention is likely to be. If viewers are likely - // to be focusing their attention on a part of the screen that doesn't have moving - // objects with sharp edges, such as sports athletes' faces, you might choose to - // disable this feature. Related setting: When you enable temporal adaptive - // quantization, adjust the strength of the filter with the setting Adaptive - // quantization (adaptiveQuantization). + // The best way to set up adaptive quantization is to keep the default value, + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose to + // change the default value for Adaptive quantization. For this setting, keep the + // default value, Enabled, to adjust quantization within each frame based on + // temporal variation of content complexity. When you enable this feature, the + // encoder uses fewer bits on areas of the frame that aren't moving and uses more + // bits on complex objects with sharp edges that move a lot. For example, this + // feature improves the readability of text tickers on newscasts and scoreboards on + // sports matches. Enabling this feature will almost always improve your video + // quality. Note, though, that this feature doesn't take into account where the + // viewer's attention is likely to be. If viewers are likely to be focusing their + // attention on a part of the screen that doesn't have moving objects with sharp + // edges, such as sports athletes' faces, you might choose to disable this feature. + // Related setting: When you enable temporal adaptive quantization, adjust the + // strength of the filter with the setting Adaptive quantization. TemporalAdaptiveQuantization XavcTemporalAdaptiveQuantization - // Required when you set (Profile) under - // (VideoDescription)>(CodecSettings)>(XavcSettings) to the value - // XAVC_4K_INTRA_CBG. + // Required when you set Profile to the value XAVC_4K_INTRA_CBG. Xavc4kIntraCbgProfileSettings *Xavc4kIntraCbgProfileSettings - // Required when you set (Profile) under - // (VideoDescription)>(CodecSettings)>(XavcSettings) to the value - // XAVC_4K_INTRA_VBR. + // Required when you set Profile to the value XAVC_4K_INTRA_VBR. Xavc4kIntraVbrProfileSettings *Xavc4kIntraVbrProfileSettings - // Required when you set (Profile) under - // (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K. + // Required when you set Profile to the value XAVC_4K. Xavc4kProfileSettings *Xavc4kProfileSettings - // Required when you set (Profile) under - // (VideoDescription)>(CodecSettings)>(XavcSettings) to the value - // XAVC_HD_INTRA_CBG. + // Required when you set Profile to the value XAVC_HD_INTRA_CBG. XavcHdIntraCbgProfileSettings *XavcHdIntraCbgProfileSettings - // Required when you set (Profile) under - // (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD. + // Required when you set Profile to the value XAVC_HD. XavcHdProfileSettings *XavcHdProfileSettings noSmithyDocumentSerde diff --git a/service/omics/doc.go b/service/omics/doc.go index fff35e33a1e..80e40cf482a 100644 --- a/service/omics/doc.go +++ b/service/omics/doc.go @@ -3,7 +3,7 @@ // Package omics provides the API client, operations, and parameter types for // Amazon Omics. // -// This is the Amazon Omics API Reference. For an introduction to the service, see -// What is Amazon Omics? (https://docs.aws.amazon.com/omics/latest/dev/) in the -// Amazon Omics User Guide. +// This is the AWS HealthOmics API Reference. For an introduction to the service, +// see What is AWS HealthOmics? (https://docs.aws.amazon.com/omics/latest/dev/) in +// the AWS HealthOmics User Guide. package omics diff --git a/service/opensearchserverless/types/enums.go b/service/opensearchserverless/types/enums.go index 609b46b6cbb..10ccf232cca 100644 --- a/service/opensearchserverless/types/enums.go +++ b/service/opensearchserverless/types/enums.go @@ -53,6 +53,8 @@ const ( CollectionTypeSearch CollectionType = "SEARCH" // Timeseries collection type CollectionTypeTimeseries CollectionType = "TIMESERIES" + // Vectorsearch collection type + CollectionTypeVectorsearch CollectionType = "VECTORSEARCH" ) // Values returns all known values for CollectionType. Note that this can be @@ -62,6 +64,7 @@ func (CollectionType) Values() []CollectionType { return []CollectionType{ "SEARCH", "TIMESERIES", + "VECTORSEARCH", } } diff --git a/service/opensearchserverless/types/errors.go b/service/opensearchserverless/types/errors.go index 8e689d580b3..6475c770b47 100644 --- a/service/opensearchserverless/types/errors.go +++ b/service/opensearchserverless/types/errors.go @@ -61,7 +61,8 @@ func (e *InternalServerException) ErrorCode() string { } func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// OCU Limit Exceeded for service limits +// Thrown when the collection you're attempting to create results in a number of +// search or indexing OCUs that exceeds the account limit. type OcuLimitExceededException struct { Message *string diff --git a/service/polly/types/enums.go b/service/polly/types/enums.go index 72f2162700d..4b0d94be29e 100644 --- a/service/polly/types/enums.go +++ b/service/polly/types/enums.go @@ -79,6 +79,7 @@ const ( LanguageCodeArAe LanguageCode = "ar-AE" LanguageCodeFiFi LanguageCode = "fi-FI" LanguageCodeEnIe LanguageCode = "en-IE" + LanguageCodeNlBe LanguageCode = "nl-BE" ) // Values returns all known values for LanguageCode. Note that this can be @@ -123,6 +124,7 @@ func (LanguageCode) Values() []LanguageCode { "ar-AE", "fi-FI", "en-IE", + "nl-BE", } } @@ -304,6 +306,7 @@ const ( VoiceIdTomoko VoiceId = "Tomoko" VoiceIdNiamh VoiceId = "Niamh" VoiceIdSofie VoiceId = "Sofie" + VoiceIdLisa VoiceId = "Lisa" ) // Values returns all known values for VoiceId. Note that this can be expanded in @@ -401,5 +404,6 @@ func (VoiceId) Values() []VoiceId { "Tomoko", "Niamh", "Sofie", + "Lisa", } } diff --git a/service/route53/api_op_ChangeResourceRecordSets.go b/service/route53/api_op_ChangeResourceRecordSets.go index ee433a8425d..6f04700a465 100644 --- a/service/route53/api_op_ChangeResourceRecordSets.go +++ b/service/route53/api_op_ChangeResourceRecordSets.go @@ -58,10 +58,11 @@ import ( // every kind of resource record set that you can create, delete, or update by // using ChangeResourceRecordSets . Change Propagation to Route 53 DNS Servers When // you submit a ChangeResourceRecordSets request, Route 53 propagates your changes -// to all of the Route 53 authoritative DNS servers. While your changes are -// propagating, GetChange returns a status of PENDING . When propagation is -// complete, GetChange returns a status of INSYNC . Changes generally propagate to -// all Route 53 name servers within 60 seconds. For more information, see GetChange (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html) +// to all of the Route 53 authoritative DNS servers managing the hosted zone. While +// your changes are propagating, GetChange returns a status of PENDING . When +// propagation is complete, GetChange returns a status of INSYNC . Changes +// generally propagate to all Route 53 name servers managing the hosted zone within +// 60 seconds. For more information, see GetChange (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html) // . Limits on ChangeResourceRecordSets Requests For information about the limits // on a ChangeResourceRecordSets request, see Limits (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html) // in the Amazon Route 53 Developer Guide. diff --git a/service/route53/api_op_CreateHostedZone.go b/service/route53/api_op_CreateHostedZone.go index d97e0251f10..cb6ba488785 100644 --- a/service/route53/api_op_CreateHostedZone.go +++ b/service/route53/api_op_CreateHostedZone.go @@ -93,7 +93,10 @@ type CreateHostedZoneInput struct { // ID that Amazon Route 53 assigned to the reusable delegation set when you created // it. For more information about reusable delegation sets, see // CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html) - // . + // . If you are using a reusable delegation set to create a public hosted zone for + // a subdomain, make sure that the parent hosted zone doesn't use one or more of + // the same name servers. If you have overlapping nameservers, the operation will + // cause a ConflictingDomainsExist error. DelegationSetId *string // (Optional) A complex type that contains the following optional values: diff --git a/service/route53/api_op_CreateTrafficPolicyInstance.go b/service/route53/api_op_CreateTrafficPolicyInstance.go index c9b33f88e82..5a90e3d25ee 100644 --- a/service/route53/api_op_CreateTrafficPolicyInstance.go +++ b/service/route53/api_op_CreateTrafficPolicyInstance.go @@ -16,7 +16,12 @@ import ( // associates the resource record sets with a specified domain name (such as // example.com) or subdomain name (such as www.example.com). Amazon Route 53 // responds to DNS queries for the domain or subdomain name by using the resource -// record sets that CreateTrafficPolicyInstance created. +// record sets that CreateTrafficPolicyInstance created. After you submit an +// CreateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 +// creates the resource record sets that are specified in the traffic policy +// definition. Use GetTrafficPolicyInstance with the id of new traffic policy +// instance to confirm that the CreateTrafficPolicyInstance request completed +// successfully. For more information, see the State response element. func (c *Client) CreateTrafficPolicyInstance(ctx context.Context, params *CreateTrafficPolicyInstanceInput, optFns ...func(*Options)) (*CreateTrafficPolicyInstanceOutput, error) { if params == nil { params = &CreateTrafficPolicyInstanceInput{} diff --git a/service/route53/api_op_GetChange.go b/service/route53/api_op_GetChange.go index 9e7b05fb002..ce48e568c46 100644 --- a/service/route53/api_op_GetChange.go +++ b/service/route53/api_op_GetChange.go @@ -19,10 +19,10 @@ import ( // Returns the current status of a change batch request. The status is one of the // following values: // - PENDING indicates that the changes in this request have not propagated to -// all Amazon Route 53 DNS servers. This is the initial status of all change batch -// requests. +// all Amazon Route 53 DNS servers managing the hosted zone. This is the initial +// status of all change batch requests. // - INSYNC indicates that the changes have propagated to all Route 53 DNS -// servers. +// servers managing the hosted zone. func (c *Client) GetChange(ctx context.Context, params *GetChangeInput, optFns ...func(*Options)) (*GetChangeOutput, error) { if params == nil { params = &GetChangeInput{} diff --git a/service/route53/api_op_GetTrafficPolicyInstance.go b/service/route53/api_op_GetTrafficPolicyInstance.go index bd1575961ae..a5b3c7e829e 100644 --- a/service/route53/api_op_GetTrafficPolicyInstance.go +++ b/service/route53/api_op_GetTrafficPolicyInstance.go @@ -11,12 +11,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets information about a specified traffic policy instance. After you submit a -// CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's -// a brief delay while Amazon Route 53 creates the resource record sets that are -// specified in the traffic policy definition. For more information, see the State -// response element. In the Route 53 console, traffic policy instances are known as -// policy records. +// Gets information about a specified traffic policy instance. Use +// GetTrafficPolicyInstance with the id of new traffic policy instance to confirm +// that the CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request +// completed successfully. For more information, see the State response element. +// In the Route 53 console, traffic policy instances are known as policy records. func (c *Client) GetTrafficPolicyInstance(ctx context.Context, params *GetTrafficPolicyInstanceInput, optFns ...func(*Options)) (*GetTrafficPolicyInstanceOutput, error) { if params == nil { params = &GetTrafficPolicyInstanceInput{} diff --git a/service/route53/api_op_TestDNSAnswer.go b/service/route53/api_op_TestDNSAnswer.go index 101c752257e..d75dcd376a6 100644 --- a/service/route53/api_op_TestDNSAnswer.go +++ b/service/route53/api_op_TestDNSAnswer.go @@ -14,7 +14,10 @@ import ( // Gets the value that Amazon Route 53 returns in response to a DNS request for a // specified record name and type. You can optionally specify the IP address of a // DNS resolver, an EDNS0 client subnet IP address, and a subnet mask. This call -// only supports querying public hosted zones. +// only supports querying public hosted zones. The TestDnsAnswer returns +// information similar to what you would expect from the answer section of the dig +// command. Therefore, if you query for the name servers of a subdomain that point +// to the parent name servers, those will not be returned. func (c *Client) TestDNSAnswer(ctx context.Context, params *TestDNSAnswerInput, optFns ...func(*Options)) (*TestDNSAnswerOutput, error) { if params == nil { params = &TestDNSAnswerInput{} diff --git a/service/route53/api_op_UpdateTrafficPolicyInstance.go b/service/route53/api_op_UpdateTrafficPolicyInstance.go index b7ef498bcce..ec7665c4f72 100644 --- a/service/route53/api_op_UpdateTrafficPolicyInstance.go +++ b/service/route53/api_op_UpdateTrafficPolicyInstance.go @@ -11,6 +11,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) +// After you submit a UpdateTrafficPolicyInstance request, there's a brief delay +// while Route 53 creates the resource record sets that are specified in the +// traffic policy definition. Use GetTrafficPolicyInstance with the id of updated +// traffic policy instance confirm that the UpdateTrafficPolicyInstance request +// completed successfully. For more information, see the State response element. // Updates the resource record sets in a specified hosted zone that were created // based on the settings in a specified traffic policy version. When you update a // traffic policy instance, Amazon Route 53 continues to respond to DNS queries for diff --git a/service/servicediscovery/internal/endpoints/endpoints.go b/service/servicediscovery/internal/endpoints/endpoints.go index 4a320d8846d..35906e6d60c 100644 --- a/service/servicediscovery/internal/endpoints/endpoints.go +++ b/service/servicediscovery/internal/endpoints/endpoints.go @@ -146,7 +146,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "af-south-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.af-south-1.amazonaws.com", + Hostname: "servicediscovery.af-south-1.api.aws", }, endpoints.EndpointKey{ Region: "ap-east-1", @@ -155,7 +155,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-east-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-east-1.amazonaws.com", + Hostname: "servicediscovery.ap-east-1.api.aws", }, endpoints.EndpointKey{ Region: "ap-northeast-1", @@ -164,7 +164,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-northeast-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-northeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-1.api.aws", }, endpoints.EndpointKey{ Region: "ap-northeast-2", @@ -173,7 +173,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-northeast-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-northeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-2.api.aws", }, endpoints.EndpointKey{ Region: "ap-northeast-3", @@ -182,7 +182,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-northeast-3", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-northeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-3.api.aws", }, endpoints.EndpointKey{ Region: "ap-south-1", @@ -191,7 +191,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-south-1.amazonaws.com", + Hostname: "servicediscovery.ap-south-1.api.aws", }, endpoints.EndpointKey{ Region: "ap-south-2", @@ -200,7 +200,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-south-2.amazonaws.com", + Hostname: "servicediscovery.ap-south-2.api.aws", }, endpoints.EndpointKey{ Region: "ap-southeast-1", @@ -209,7 +209,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-southeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-1.api.aws", }, endpoints.EndpointKey{ Region: "ap-southeast-2", @@ -218,7 +218,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-southeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-2.api.aws", }, endpoints.EndpointKey{ Region: "ap-southeast-3", @@ -227,7 +227,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-3", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-southeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-3.api.aws", }, endpoints.EndpointKey{ Region: "ap-southeast-4", @@ -236,7 +236,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-4", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ap-southeast-4.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-4.api.aws", }, endpoints.EndpointKey{ Region: "ca-central-1", @@ -247,11 +247,17 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, endpoints.EndpointKey{ Region: "ca-central-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.ca-central-1.amazonaws.com", + Hostname: "servicediscovery.ca-central-1.api.aws", }, endpoints.EndpointKey{ Region: "ca-central-1-fips", @@ -269,7 +275,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-central-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-central-1.amazonaws.com", + Hostname: "servicediscovery.eu-central-1.api.aws", }, endpoints.EndpointKey{ Region: "eu-central-2", @@ -278,7 +284,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-central-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-central-2.amazonaws.com", + Hostname: "servicediscovery.eu-central-2.api.aws", }, endpoints.EndpointKey{ Region: "eu-north-1", @@ -287,7 +293,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-north-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-north-1.amazonaws.com", + Hostname: "servicediscovery.eu-north-1.api.aws", }, endpoints.EndpointKey{ Region: "eu-south-1", @@ -296,7 +302,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-south-1.amazonaws.com", + Hostname: "servicediscovery.eu-south-1.api.aws", }, endpoints.EndpointKey{ Region: "eu-south-2", @@ -305,7 +311,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-south-2.amazonaws.com", + Hostname: "servicediscovery.eu-south-2.api.aws", }, endpoints.EndpointKey{ Region: "eu-west-1", @@ -314,7 +320,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-west-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-west-1.amazonaws.com", + Hostname: "servicediscovery.eu-west-1.api.aws", }, endpoints.EndpointKey{ Region: "eu-west-2", @@ -323,7 +329,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-west-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-west-2.amazonaws.com", + Hostname: "servicediscovery.eu-west-2.api.aws", }, endpoints.EndpointKey{ Region: "eu-west-3", @@ -332,7 +338,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-west-3", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.eu-west-3.amazonaws.com", + Hostname: "servicediscovery.eu-west-3.api.aws", }, endpoints.EndpointKey{ Region: "me-central-1", @@ -341,7 +347,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "me-central-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.me-central-1.amazonaws.com", + Hostname: "servicediscovery.me-central-1.api.aws", }, endpoints.EndpointKey{ Region: "me-south-1", @@ -350,7 +356,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "me-south-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.me-south-1.amazonaws.com", + Hostname: "servicediscovery.me-south-1.api.aws", }, endpoints.EndpointKey{ Region: "sa-east-1", @@ -359,7 +365,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "sa-east-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.sa-east-1.amazonaws.com", + Hostname: "servicediscovery.sa-east-1.api.aws", }, endpoints.EndpointKey{ Region: "us-east-1", @@ -370,11 +376,17 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, endpoints.EndpointKey{ Region: "us-east-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.us-east-1.amazonaws.com", + Hostname: "servicediscovery.us-east-1.api.aws", }, endpoints.EndpointKey{ Region: "us-east-1-fips", @@ -394,11 +406,17 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, endpoints.EndpointKey{ Region: "us-east-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.us-east-2.amazonaws.com", + Hostname: "servicediscovery.us-east-2.api.aws", }, endpoints.EndpointKey{ Region: "us-east-2-fips", @@ -418,11 +436,17 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, endpoints.EndpointKey{ Region: "us-west-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.us-west-1.amazonaws.com", + Hostname: "servicediscovery.us-west-1.api.aws", }, endpoints.EndpointKey{ Region: "us-west-1-fips", @@ -442,11 +466,17 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, endpoints.EndpointKey{ Region: "us-west-2", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.us-west-2.amazonaws.com", + Hostname: "servicediscovery.us-west-2.api.aws", }, endpoints.EndpointKey{ Region: "us-west-2-fips", @@ -501,7 +531,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "cn-north-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", }, endpoints.EndpointKey{ Region: "cn-northwest-1", @@ -510,7 +540,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "cn-northwest-1", Variant: endpoints.DualStackVariant, }: { - Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", }, }, }, @@ -663,6 +693,12 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "us-gov-east-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-gov-east-1", Variant: endpoints.FIPSVariant, @@ -687,6 +723,12 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "us-gov-west-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-gov-west-1", Variant: endpoints.FIPSVariant,