From d1c997498fd43019b43d52fcd74e402e16630636 Mon Sep 17 00:00:00 2001
From: aws-sdk-go-automation
<43143561+aws-sdk-go-automation@users.noreply.github.com>
Date: Wed, 14 Jul 2021 11:16:39 -0700
Subject: [PATCH] Release v1.40.0 (2021-07-14) (#4007)
Release v1.40.0 (2021-07-14)
===
### Service Client Updates
* `service/acm`: Updates service API and documentation
* Added support for RSA 3072 SSL certificate import
* `service/dms`: Updates service API and documentation
* Release of feature needed for ECA-Endpoint settings. This allows customer to delete a field in endpoint settings by using --exact-settings flag in modify-endpoint api. This also displays default values for certain required fields of endpoint settings in describe-endpoint-settings api.
* `service/glue`: Updates service API and documentation
* Add support for Event Driven Workflows
* `service/healthlake`: Updates service API, documentation, and paginators
* `service/lightsail`: Updates service API and documentation
* This release adds support for the Amazon Lightsail object storage service, which allows you to create buckets and store objects.
* `service/wellarchitected`: Updates service API and documentation
### SDK Features
* `aws/session`: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints ([#4006](https://github.com/aws/aws-sdk-go/pull/4006))
* Adds support for `AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE` environment variable which may specify `IPv6` or `IPv4` for selecting the desired endpoint.
* Adds support for `ec2_metadata_service_endpoint_mode` AWS profile key, which may specify `IPv6` or `IPv4` for selecting the desired endpoint. Has lower precedence then `AWS_EC2_METADATA_SERVICE_ENDPOINT`.
* Adds support for `ec2_metadata_service_endpoint` AWS profile key, which may specify an explicit endpoint URI. Has higher precedence then `ec2_metadata_service_endpoint_mode`.
* `aws/endpoints`: Supported has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints ([#4006](https://github.com/aws/aws-sdk-go/pull/4006))
---
CHANGELOG.md | 22 +
CHANGELOG_PENDING.md | 5 -
aws/version.go | 2 +-
models/apis/acm/2015-12-08/api-2.json | 3 +-
models/apis/acm/2015-12-08/docs-2.json | 32 +-
models/apis/dms/2016-01-01/api-2.json | 28 +-
models/apis/dms/2016-01-01/docs-2.json | 427 +--
models/apis/glue/2017-03-31/api-2.json | 41 +-
models/apis/glue/2017-03-31/docs-2.json | 38 +-
models/apis/healthlake/2017-07-01/api-2.json | 258 +-
models/apis/healthlake/2017-07-01/docs-2.json | 178 +-
.../healthlake/2017-07-01/paginators-1.json | 10 +
models/apis/lightsail/2016-11-28/api-2.json | 498 ++-
models/apis/lightsail/2016-11-28/docs-2.json | 486 ++-
.../wellarchitected/2020-03-31/api-2.json | 83 +-
.../wellarchitected/2020-03-31/docs-2.json | 78 +-
service/acm/api.go | 99 +-
service/acm/doc.go | 7 +-
service/acm/errors.go | 7 +-
service/databasemigrationservice/api.go | 1288 ++++---
service/databasemigrationservice/doc.go | 20 +-
service/databasemigrationservice/errors.go | 14 +-
service/glue/api.go | 162 +-
service/healthlake/api.go | 2838 +++++++++++----
.../healthlake/healthlakeiface/interface.go | 26 +
service/lightsail/api.go | 3159 ++++++++++++++++-
service/lightsail/doc.go | 11 +-
service/lightsail/lightsailiface/interface.go | 44 +
service/wellarchitected/api.go | 304 +-
29 files changed, 8259 insertions(+), 1909 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 82b2c5fc039..bd147312335 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,25 @@
+Release v1.40.0 (2021-07-14)
+===
+
+### Service Client Updates
+* `service/acm`: Updates service API and documentation
+ * Added support for RSA 3072 SSL certificate import
+* `service/dms`: Updates service API and documentation
+ * Release of feature needed for ECA-Endpoint settings. This allows customer to delete a field in endpoint settings by using --exact-settings flag in modify-endpoint api. This also displays default values for certain required fields of endpoint settings in describe-endpoint-settings api.
+* `service/glue`: Updates service API and documentation
+ * Add support for Event Driven Workflows
+* `service/healthlake`: Updates service API, documentation, and paginators
+* `service/lightsail`: Updates service API and documentation
+ * This release adds support for the Amazon Lightsail object storage service, which allows you to create buckets and store objects.
+* `service/wellarchitected`: Updates service API and documentation
+
+### SDK Features
+* `aws/session`: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints ([#4006](https://github.com/aws/aws-sdk-go/pull/4006))
+ * Adds support for `AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE` environment variable which may specify `IPv6` or `IPv4` for selecting the desired endpoint.
+ * Adds support for `ec2_metadata_service_endpoint_mode` AWS profile key, which may specify `IPv6` or `IPv4` for selecting the desired endpoint. Has lower precedence then `AWS_EC2_METADATA_SERVICE_ENDPOINT`.
+ * Adds support for `ec2_metadata_service_endpoint` AWS profile key, which may specify an explicit endpoint URI. Has higher precedence then `ec2_metadata_service_endpoint_mode`.
+* `aws/endpoints`: Supported has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints ([#4006](https://github.com/aws/aws-sdk-go/pull/4006))
+
Release v1.39.6 (2021-07-13)
===
diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md
index 65369076006..8a1927a39ca 100644
--- a/CHANGELOG_PENDING.md
+++ b/CHANGELOG_PENDING.md
@@ -1,9 +1,4 @@
### SDK Features
-* `aws/session`: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints ([#4006](https://github.com/aws/aws-sdk-go/pull/4006))
- * Adds support for `AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE` environment variable which may specify `IPv6` or `IPv4` for selecting the desired endpoint.
- * Adds support for `ec2_metadata_service_endpoint_mode` AWS profile key, which may specify `IPv6` or `IPv4` for selecting the desired endpoint. Has lower precedence then `AWS_EC2_METADATA_SERVICE_ENDPOINT`.
- * Adds support for `ec2_metadata_service_endpoint` AWS profile key, which may specify an explicit endpoint URI. Has higher precedence then `ec2_metadata_service_endpoint_mode`.
-* `aws/endpoints`: Supported has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints ([#4006](https://github.com/aws/aws-sdk-go/pull/4006))
### SDK Enhancements
diff --git a/aws/version.go b/aws/version.go
index fdf6d86d4b4..67d08198ebb 100644
--- a/aws/version.go
+++ b/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.39.6"
+const SDKVersion = "1.40.0"
diff --git a/models/apis/acm/2015-12-08/api-2.json b/models/apis/acm/2015-12-08/api-2.json
index d37e33b9f19..2602c2ac34d 100644
--- a/models/apis/acm/2015-12-08/api-2.json
+++ b/models/apis/acm/2015-12-08/api-2.json
@@ -620,8 +620,9 @@
"KeyAlgorithm":{
"type":"string",
"enum":[
- "RSA_2048",
"RSA_1024",
+ "RSA_2048",
+ "RSA_3072",
"RSA_4096",
"EC_prime256v1",
"EC_secp384r1",
diff --git a/models/apis/acm/2015-12-08/docs-2.json b/models/apis/acm/2015-12-08/docs-2.json
index e5c3f814868..1e89a61ea14 100644
--- a/models/apis/acm/2015-12-08/docs-2.json
+++ b/models/apis/acm/2015-12-08/docs-2.json
@@ -1,20 +1,20 @@
{
"version": "2.0",
- "service": "
You can use AWS Certificate Manager (ACM) to manage SSL/TLS certificates for your AWS-based websites and applications. For more information about using ACM, see the AWS Certificate Manager User Guide.
", + "service": "You can use Amazon Web Services Certificate Manager (ACM) to manage SSL/TLS certificates for your Amazon Web Services-based websites and applications. For more information about using ACM, see the Amazon Web Services Certificate Manager User Guide.
", "operations": { - "AddTagsToCertificate": "Adds one or more tags to an ACM certificate. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key
and an optional value
. You specify the certificate on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair.
You can apply a tag to just one certificate if you want to identify a specific characteristic of that certificate, or you can apply the same tag to multiple certificates if you want to filter for a common relationship among those certificates. Similarly, you can apply the same tag to multiple resources if you want to specify a relationship among those resources. For example, you can add the same tag to an ACM certificate and an Elastic Load Balancing load balancer to indicate that they are both used by the same website. For more information, see Tagging ACM certificates.
To remove one or more tags, use the RemoveTagsFromCertificate action. To view all of the tags that have been applied to the certificate, use the ListTagsForCertificate action.
", - "DeleteCertificate": "Deletes a certificate and its associated private key. If this action succeeds, the certificate no longer appears in the list that can be displayed by calling the ListCertificates action or be retrieved by calling the GetCertificate action. The certificate will not be available for use by AWS services integrated with ACM.
You cannot delete an ACM certificate that is being used by another AWS service. To delete a certificate that is in use, the certificate association must first be removed.
Adds one or more tags to an ACM certificate. Tags are labels that you can use to identify and organize your Amazon Web Services resources. Each tag consists of a key
and an optional value
. You specify the certificate on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair.
You can apply a tag to just one certificate if you want to identify a specific characteristic of that certificate, or you can apply the same tag to multiple certificates if you want to filter for a common relationship among those certificates. Similarly, you can apply the same tag to multiple resources if you want to specify a relationship among those resources. For example, you can add the same tag to an ACM certificate and an Elastic Load Balancing load balancer to indicate that they are both used by the same website. For more information, see Tagging ACM certificates.
To remove one or more tags, use the RemoveTagsFromCertificate action. To view all of the tags that have been applied to the certificate, use the ListTagsForCertificate action.
", + "DeleteCertificate": "Deletes a certificate and its associated private key. If this action succeeds, the certificate no longer appears in the list that can be displayed by calling the ListCertificates action or be retrieved by calling the GetCertificate action. The certificate will not be available for use by Amazon Web Services services integrated with ACM.
You cannot delete an ACM certificate that is being used by another Amazon Web Services service. To delete a certificate that is in use, the certificate association must first be removed.
Returns detailed metadata about the specified ACM certificate.
", "ExportCertificate": "Exports a private certificate issued by a private certificate authority (CA) for use anywhere. The exported file contains the certificate, the certificate chain, and the encrypted private 2048-bit RSA key associated with the public key that is embedded in the certificate. For security, you must assign a passphrase for the private key when exporting it.
For information about exporting and formatting a certificate using the ACM console or CLI, see Export a Private Certificate.
", - "GetAccountConfiguration": "Returns the account configuration options associated with an AWS account.
", + "GetAccountConfiguration": "Returns the account configuration options associated with an Amazon Web Services account.
", "GetCertificate": "Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the certificates and inspect individual fields.
", - "ImportCertificate": "Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the AWS Certificate Manager User Guide.
ACM does not provide managed renewal for certificates that you import.
Note the following guidelines when importing third party certificates:
You must enter the private key that matches the certificate you are importing.
The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase.
The private key must be no larger than 5 KB (5,120 bytes).
If the certificate you are importing is not self-signed, you must enter its certificate chain.
If a certificate chain is included, the issuer must be the subject of one of the certificates in the chain.
The certificate, private key, and certificate chain must be PEM-encoded.
The current time must be between the Not Before
and Not After
certificate fields.
The Issuer
field must not be empty.
The OCSP authority URL, if present, must not exceed 1000 characters.
To import a new certificate, omit the CertificateArn
argument. Include this argument only when you want to replace a previously imported certificate.
When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by fileb://
. For example, you can specify a certificate saved in the C:\\temp
folder as fileb://C:\\temp\\certificate_to_import.pem
. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs.
When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using.
The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA.
This operation returns the Amazon Resource Name (ARN) of the imported certificate.
", + "ImportCertificate": "Imports a certificate into Amazon Web Services Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide.
ACM does not provide managed renewal for certificates that you import.
Note the following guidelines when importing third party certificates:
You must enter the private key that matches the certificate you are importing.
The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase.
The private key must be no larger than 5 KB (5,120 bytes).
If the certificate you are importing is not self-signed, you must enter its certificate chain.
If a certificate chain is included, the issuer must be the subject of one of the certificates in the chain.
The certificate, private key, and certificate chain must be PEM-encoded.
The current time must be between the Not Before
and Not After
certificate fields.
The Issuer
field must not be empty.
The OCSP authority URL, if present, must not exceed 1000 characters.
To import a new certificate, omit the CertificateArn
argument. Include this argument only when you want to replace a previously imported certificate.
When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by fileb://
. For example, you can specify a certificate saved in the C:\\temp
folder as fileb://C:\\temp\\certificate_to_import.pem
. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs.
When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using.
The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA.
This operation returns the Amazon Resource Name (ARN) of the imported certificate.
", "ListCertificates": "Retrieves a list of certificate ARNs and domain names. You can request that only certificates that match a specific status be listed. You can also filter by specific attributes of the certificate. Default filtering returns only RSA_2048
certificates. For more information, see Filters.
Lists the tags that have been applied to the ACM certificate. Use the certificate's Amazon Resource Name (ARN) to specify the certificate. To add a tag to an ACM certificate, use the AddTagsToCertificate action. To delete a tag, use the RemoveTagsFromCertificate action.
", "PutAccountConfiguration": "Adds or modifies account-level configurations in ACM.
The supported configuration option is DaysBeforeExpiry
. This option specifies the number of days prior to certificate expiration when ACM starts generating EventBridge
events. ACM sends one event per day per certificate until the certificate expires. By default, accounts receive events starting 45 days before certificate expiration.
Remove one or more tags from an ACM certificate. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this function, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value.
To add tags to a certificate, use the AddTagsToCertificate action. To view all of the tags that have been applied to a specific ACM certificate, use the ListTagsForCertificate action.
", "RenewCertificate": "Renews an eligible ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your ACM PCA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide.
", - "RequestCertificate": "Requests an ACM certificate for use with other AWS services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName
parameter. You can also specify additional FQDNs in the SubjectAlternativeNames
parameter.
If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner.
", + "RequestCertificate": "Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName
parameter. You can also specify additional FQDNs in the SubjectAlternativeNames
parameter.
If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner.
ACM behavior differs from the https://tools.ietf.org/html/rfc6125#appendix-B.2RFC 6125 specification of the certificate validation process. first checks for a subject alternative name, and, if it finds one, ignores the common name (CN)
Resends the email that requests domain ownership validation. The domain owner or an authorized representative must approve the ACM certificate before it can be issued. The certificate can be approved by clicking a link in the mail to navigate to the Amazon certificate approval website and then clicking I Approve. However, the validation email can be blocked by spam filters. Therefore, if you do not receive the original mail, you can request that the mail be resent within 72 hours of requesting the ACM certificate. If more than 72 hours have elapsed since your original request or since your last attempt to resend validation mail, you must request a new certificate. For more information about setting up your contact email addresses, see Configure Email for your Domain.
", "UpdateCertificateOptions": "Updates a certificate. Currently, you can use this function to specify whether to opt in to or out of recording your certificate in a certificate transparency log. For more information, see Opting Out of Certificate Transparency Logging.
" }, @@ -33,7 +33,7 @@ "base": null, "refs": { "AddTagsToCertificateRequest$CertificateArn": "String that contains the ARN of the ACM certificate to which the tag is to be applied. This must be of the form:
arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012
For more information about ARNs, see Amazon Resource Names (ARNs).
", - "CertificateDetail$CertificateArn": "The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.
", + "CertificateDetail$CertificateArn": "The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.
", "CertificateDetail$CertificateAuthorityArn": "The Amazon Resource Name (ARN) of the ACM PCA private certificate authority (CA) that issued the certificate. This has the following format:
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
Amazon Resource Name (ARN) of the certificate. This is of the form:
arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012
For more information about ARNs, see Amazon Resource Names (ARNs).
", "DeleteCertificateRequest$CertificateArn": "String that contains the ARN of the ACM certificate to be deleted. This must be of the form:
arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012
For more information about ARNs, see Amazon Resource Names (ARNs).
", @@ -45,7 +45,7 @@ "ListTagsForCertificateRequest$CertificateArn": "String that contains the ARN of the ACM certificate for which you want to list the tags. This must have the following form:
arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012
For more information about ARNs, see Amazon Resource Names (ARNs).
", "RemoveTagsFromCertificateRequest$CertificateArn": "String that contains the ARN of the ACM Certificate with one or more tags that you want to remove. This must be of the form:
arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012
For more information about ARNs, see Amazon Resource Names (ARNs).
", "RenewCertificateRequest$CertificateArn": "String that contains the ARN of the ACM certificate to be renewed. This must be of the form:
arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012
For more information about ARNs, see Amazon Resource Names (ARNs).
", - "RequestCertificateRequest$CertificateAuthorityArn": "The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the AWS Certificate Manager Private Certificate Authority (PCA) user guide. The ARN must have the following form:
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the Amazon Web Services Certificate Manager Private Certificate Authority (PCA) user guide. The ARN must have the following form:
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
String that contains the ARN of the issued certificate. This must be of the form:
arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
String that contains the ARN of the requested certificate. The certificate ARN is generated and returned by the RequestCertificate action as soon as the request is made. By default, using this parameter causes email to be sent to all top-level domains you specified in the certificate request. The ARN must be of the form:
arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
ARN of the requested certificate to update. This must be of the form:
arn:aws:acm:us-east-1:account:certificate/12345678-1234-1234-1234-123456789012
The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED
. For certificates that you imported with ImportCertificate, this value is IMPORTED
. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the AWS Certificate Manager User Guide.
The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED
. For certificates that you imported with ImportCertificate, this value is IMPORTED
. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide.
Object containing expiration events options associated with an AWS account.
", + "base": "Object containing expiration events options associated with an Amazon Web Services account.
", "refs": { - "GetAccountConfigurationResponse$ExpiryEvents": "Expiration events configuration options associated with the AWS account.
", + "GetAccountConfigurationResponse$ExpiryEvents": "Expiration events configuration options associated with the Amazon Web Services account.
", "PutAccountConfigurationRequest$ExpiryEvents": "Specifies expiration events associated with an account.
" } }, @@ -252,7 +252,7 @@ "FailureReason": { "base": null, "refs": { - "CertificateDetail$FailureReason": "The reason the certificate request failed. This value exists only when the certificate status is FAILED
. For more information, see Certificate Request Failed in the AWS Certificate Manager User Guide.
The reason the certificate request failed. This value exists only when the certificate status is FAILED
. For more information, see Certificate Request Failed in the Amazon Web Services Certificate Manager User Guide.
The reason that a renewal request was unsuccessful.
" } }, @@ -297,7 +297,7 @@ "InUseList": { "base": null, "refs": { - "CertificateDetail$InUseBy": "A list of ARNs for the AWS resources that are using the certificate. A certificate can be used by multiple AWS resources.
" + "CertificateDetail$InUseBy": "A list of ARNs for the Amazon Web Services resources that are using the certificate. A certificate can be used by multiple Amazon Web Services resources.
" } }, "InvalidArgsException": { @@ -490,7 +490,7 @@ } }, "ResourceInUseException": { - "base": "The certificate is in use by another AWS service in the caller's account. Remove the association and try again.
", + "base": "The certificate is in use by another Amazon Web Services service in the caller's account. Remove the association and try again.
", "refs": { } }, @@ -500,7 +500,7 @@ } }, "ResourceRecord": { - "base": "Contains a DNS record value that you can use to can use to validate ownership or control of a domain. This is used by the DescribeCertificate action.
", + "base": "Contains a DNS record value that you can use to validate ownership or control of a domain. This is used by the DescribeCertificate action.
", "refs": { "DomainValidation$ResourceRecord": "Contains the CNAME record that you add to your DNS database for domain validation. For more information, see Use DNS to Validate Domain Ownership.
Note: The CNAME information that you need does not include the name of your domain. If you include your domain name in the DNS database CNAME record, validation fails. For example, if the name is \"_a79865eb4cd1a6ab990a45779b4e0b96.yourdomain.com\", only \"_a79865eb4cd1a6ab990a45779b4e0b96\" must be used.
" } @@ -611,7 +611,7 @@ } }, "ValidationException": { - "base": "The supplied input failed to satisfy constraints of an AWS service.
", + "base": "The supplied input failed to satisfy constraints of an Amazon Web Services service.
", "refs": { } }, diff --git a/models/apis/dms/2016-01-01/api-2.json b/models/apis/dms/2016-01-01/api-2.json index 93ac6acecfe..bc7e38ac9d3 100644 --- a/models/apis/dms/2016-01-01/api-2.json +++ b/models/apis/dms/2016-01-01/api-2.json @@ -1654,7 +1654,8 @@ "Units":{"shape":"String"}, "Applicability":{"shape":"String"}, "IntValueMin":{"shape":"IntegerOptional"}, - "IntValueMax":{"shape":"IntegerOptional"} + "IntValueMax":{"shape":"IntegerOptional"}, + "DefaultValue":{"shape":"String"} } }, "EndpointSettingEnumValues":{ @@ -1891,7 +1892,8 @@ "SslClientKeyPassword":{"shape":"SecretString"}, "SslCaCertificateArn":{"shape":"String"}, "SaslUsername":{"shape":"String"}, - "SaslPassword":{"shape":"SecretString"} + "SaslPassword":{"shape":"SecretString"}, + "NoHexPrefix":{"shape":"BooleanOptional"} } }, "KeyList":{ @@ -1909,7 +1911,8 @@ "PartitionIncludeSchemaTable":{"shape":"BooleanOptional"}, "IncludeTableAlterOperations":{"shape":"BooleanOptional"}, "IncludeControlDetails":{"shape":"BooleanOptional"}, - "IncludeNullAndEmpty":{"shape":"BooleanOptional"} + "IncludeNullAndEmpty":{"shape":"BooleanOptional"}, + "NoHexPrefix":{"shape":"BooleanOptional"} } }, "ListTagsForResourceMessage":{ @@ -1993,7 +1996,8 @@ "SybaseSettings":{"shape":"SybaseSettings"}, "MicrosoftSQLServerSettings":{"shape":"MicrosoftSQLServerSettings"}, "IBMDb2Settings":{"shape":"IBMDb2Settings"}, - "DocDbSettings":{"shape":"DocDbSettings"} + "DocDbSettings":{"shape":"DocDbSettings"}, + "ExactSettings":{"shape":"BooleanOptional"} } }, "ModifyEndpointResponse":{ @@ -2192,7 +2196,11 @@ "SecurityDbEncryptionName":{"shape":"String"}, "ServerName":{"shape":"String"}, "SpatialDataOptionToGeoJsonFunctionName":{"shape":"String"}, + "StandbyDelayTime":{"shape":"IntegerOptional"}, "Username":{"shape":"String"}, + "UseBFile":{"shape":"BooleanOptional"}, + "UseDirectPathFullLoad":{"shape":"BooleanOptional"}, + "UseLogminerReader":{"shape":"BooleanOptional"}, "SecretsManagerAccessRoleArn":{"shape":"String"}, "SecretsManagerSecretId":{"shape":"String"}, "SecretsManagerOracleAsmAccessRoleArn":{"shape":"String"}, @@ -2243,6 +2251,14 @@ "type":"list", "member":{"shape":"ResourcePendingMaintenanceActions"} }, + "PluginNameValue":{ + "type":"string", + "enum":[ + "no-preference", + "test-decoding", + "pglogical" + ] + }, "PostgreSQLSettings":{ "type":"structure", "members":{ @@ -2253,11 +2269,15 @@ "DdlArtifactsSchema":{"shape":"String"}, "ExecuteTimeout":{"shape":"IntegerOptional"}, "FailTasksOnLobTruncation":{"shape":"BooleanOptional"}, + "HeartbeatEnable":{"shape":"BooleanOptional"}, + "HeartbeatSchema":{"shape":"String"}, + "HeartbeatFrequency":{"shape":"IntegerOptional"}, "Password":{"shape":"SecretString"}, "Port":{"shape":"IntegerOptional"}, "ServerName":{"shape":"String"}, "Username":{"shape":"String"}, "SlotName":{"shape":"String"}, + "PluginName":{"shape":"PluginNameValue"}, "SecretsManagerAccessRoleArn":{"shape":"String"}, "SecretsManagerSecretId":{"shape":"String"} } diff --git a/models/apis/dms/2016-01-01/docs-2.json b/models/apis/dms/2016-01-01/docs-2.json index 45daff84ae9..55f4056dae5 100644 --- a/models/apis/dms/2016-01-01/docs-2.json +++ b/models/apis/dms/2016-01-01/docs-2.json @@ -1,58 +1,58 @@ { "version": "2.0", - "service": "AWS Database Migration Service (AWS DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL.
For more information about AWS DMS, see What Is AWS Database Migration Service? in the AWS Database Migration User Guide.
", + "service": "Database Migration Service (DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL.
For more information about DMS, see What Is Database Migration Service? in the Database Migration Service User Guide.
", "operations": { - "AddTagsToResource": "Adds metadata tags to an AWS DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS. For more information, see Tag
data type description.
Adds metadata tags to an DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS. For more information, see Tag
data type description.
Applies a pending maintenance action to a resource (for example, to a replication instance).
", "CancelReplicationTaskAssessmentRun": "Cancels a single premigration assessment run.
This operation prevents any individual assessments from running if they haven't started running. It also attempts to cancel any individual assessments that are currently running.
", - "CreateEndpoint": "Creates an endpoint using the provided settings.
", - "CreateEventSubscription": "Creates an AWS DMS event notification subscription.
You can specify the type of source (SourceType
) you want to be notified of, provide a list of AWS DMS source IDs (SourceIds
) that triggers the events, and provide a list of event categories (EventCategories
) for events you want to be notified of. If you specify both the SourceType
and SourceIds
, such as SourceType = replication-instance
and SourceIdentifier = my-replinstance
, you will be notified of all the replication instance events for the specified source. If you specify a SourceType
but don't specify a SourceIdentifier
, you receive notice of the events for that source type for all your AWS DMS sources. If you don't specify either SourceType
nor SourceIdentifier
, you will be notified of events generated from all AWS DMS sources belonging to your customer account.
For more information about AWS DMS events, see Working with Events and Notifications in the AWS Database Migration Service User Guide.
", - "CreateReplicationInstance": "Creates the replication instance using the specified parameters.
AWS DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the AWS CLI and AWS DMS API. For information on the required permissions, see IAM Permissions Needed to Use AWS DMS.
", + "CreateEndpoint": "Creates an endpoint using the provided settings.
For a MySQL source or target endpoint, don't explicitly specify the database using the DatabaseName
request parameter on the CreateEndpoint
API call. Specifying DatabaseName
when you create a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task.
Creates an DMS event notification subscription.
You can specify the type of source (SourceType
) you want to be notified of, provide a list of DMS source IDs (SourceIds
) that triggers the events, and provide a list of event categories (EventCategories
) for events you want to be notified of. If you specify both the SourceType
and SourceIds
, such as SourceType = replication-instance
and SourceIdentifier = my-replinstance
, you will be notified of all the replication instance events for the specified source. If you specify a SourceType
but don't specify a SourceIdentifier
, you receive notice of the events for that source type for all your DMS sources. If you don't specify either SourceType
nor SourceIdentifier
, you will be notified of events generated from all DMS sources belonging to your customer account.
For more information about DMS events, see Working with Events and Notifications in the Database Migration Service User Guide.
", + "CreateReplicationInstance": "Creates the replication instance using the specified parameters.
DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the CLI and DMS API. For information on the required permissions, see IAM Permissions Needed to Use DMS.
", "CreateReplicationSubnetGroup": "Creates a replication subnet group given a list of the subnet IDs in a VPC.
", "CreateReplicationTask": "Creates a replication task using the specified parameters.
", "DeleteCertificate": "Deletes the specified certificate.
", "DeleteConnection": "Deletes the connection between a replication instance and an endpoint.
", "DeleteEndpoint": "Deletes the specified endpoint.
All tasks associated with the endpoint must be deleted before you can delete the endpoint.
Deletes an AWS DMS event subscription.
", + "DeleteEventSubscription": "Deletes an DMS event subscription.
", "DeleteReplicationInstance": "Deletes the specified replication instance.
You must delete any migration tasks that are associated with the replication instance before you can delete it.
Deletes a subnet group.
", "DeleteReplicationTask": "Deletes the specified replication task.
", - "DeleteReplicationTaskAssessmentRun": "Deletes the record of a single premigration assessment run.
This operation removes all metadata that AWS DMS maintains about this assessment run. However, the operation leaves untouched all information about this assessment run that is stored in your Amazon S3 bucket.
", - "DescribeAccountAttributes": "Lists all of the AWS DMS attributes for a customer account. These attributes include AWS DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region.
This command does not take any parameters.
", + "DeleteReplicationTaskAssessmentRun": "Deletes the record of a single premigration assessment run.
This operation removes all metadata that DMS maintains about this assessment run. However, the operation leaves untouched all information about this assessment run that is stored in your Amazon S3 bucket.
", + "DescribeAccountAttributes": "Lists all of the DMS attributes for a customer account. These attributes include DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region.
This command does not take any parameters.
", "DescribeApplicableIndividualAssessments": "Provides a list of individual assessments that you can specify for a new premigration assessment run, given one or more parameters.
If you specify an existing migration task, this operation provides the default individual assessments you can specify for that task. Otherwise, the specified parameters model elements of a possible migration task on which to base a premigration assessment run.
To use these migration task modeling parameters, you must specify an existing replication instance, a source database engine, a target database engine, and a migration type. This combination of parameters potentially limits the default individual assessments available for an assessment run created for a corresponding migration task.
If you specify no parameters, this operation provides a list of all possible individual assessments that you can specify for an assessment run. If you specify any one of the task modeling parameters, you must specify all of them or the operation cannot provide a list of individual assessments. The only parameter that you can specify alone is for an existing migration task. The specified task definition then determines the default list of individual assessments that you can specify in an assessment run for the task.
", "DescribeCertificates": "Provides a description of the certificate.
", "DescribeConnections": "Describes the status of the connections that have been made between the replication instance and an endpoint. Connections are created when you test an endpoint.
", "DescribeEndpointSettings": "Returns information about the possible endpoint settings available when you create an endpoint for a specific database engine.
", "DescribeEndpointTypes": "Returns information about the type of endpoints available.
", "DescribeEndpoints": "Returns information about the endpoints for your account in the current region.
", - "DescribeEventCategories": "Lists categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in Working with Events and Notifications in the AWS Database Migration Service User Guide.
", + "DescribeEventCategories": "Lists categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in Working with Events and Notifications in the Database Migration Service User Guide.
", "DescribeEventSubscriptions": "Lists all the event subscriptions for a customer account. The description of a subscription includes SubscriptionName
, SNSTopicARN
, CustomerID
, SourceType
, SourceID
, CreationTime
, and Status
.
If you specify SubscriptionName
, this action lists the description for that subscription.
Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on AWS DMS events, see Working with Events and Notifications in the AWS Database Migration User Guide.
", + "DescribeEvents": "Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on DMS events, see Working with Events and Notifications in the Database Migration Service User Guide.
", "DescribeOrderableReplicationInstances": "Returns information about the replication instance types that can be created in the specified region.
", "DescribePendingMaintenanceActions": "For internal use only
", "DescribeRefreshSchemasStatus": "Returns the status of the RefreshSchemas operation.
", "DescribeReplicationInstanceTaskLogs": "Returns information about the task logs for the specified task.
", "DescribeReplicationInstances": "Returns information about replication instances for your account in the current region.
", "DescribeReplicationSubnetGroups": "Returns information about the replication subnet groups.
", - "DescribeReplicationTaskAssessmentResults": "Returns the task assessment results from Amazon S3. This action always returns the latest results.
", + "DescribeReplicationTaskAssessmentResults": "Returns the task assessment results from the Amazon S3 bucket that DMS creates in your account. This action always returns the latest results.
For more information about DMS task assessments, see Creating a task assessment report in the Database Migration Service User Guide.
", "DescribeReplicationTaskAssessmentRuns": "Returns a paginated list of premigration assessment runs based on filter settings.
These filter settings can specify a combination of premigration assessment runs, migration tasks, replication instances, and assessment run status values.
This operation doesn't return information about individual assessments. For this information, see the DescribeReplicationTaskIndividualAssessments
operation.
Returns a paginated list of individual assessments based on filter settings.
These filter settings can specify a combination of premigration assessment runs, migration tasks, and assessment status values.
", "DescribeReplicationTasks": "Returns information about replication tasks for your account in the current region.
", "DescribeSchemas": "Returns information about the schema for the specified endpoint.
", - "DescribeTableStatistics": "Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.
Note that the \"last updated\" column the DMS console only indicates the time that AWS DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.
", + "DescribeTableStatistics": "Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.
Note that the \"last updated\" column the DMS console only indicates the time that DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.
", "ImportCertificate": "Uploads the specified certificate.
", - "ListTagsForResource": "Lists all metadata tags attached to an AWS DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag
data type description.
Modifies the specified endpoint.
", - "ModifyEventSubscription": "Modifies an existing AWS DMS event notification subscription.
", + "ListTagsForResource": "Lists all metadata tags attached to an DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag
data type description.
Modifies the specified endpoint.
For a MySQL source or target endpoint, don't explicitly specify the database using the DatabaseName
request parameter on the ModifyEndpoint
API call. Specifying DatabaseName
when you modify a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task.
Modifies an existing DMS event notification subscription.
", "ModifyReplicationInstance": "Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request.
Some settings are applied during the maintenance window.
", "ModifyReplicationSubnetGroup": "Modifies the settings for the specified replication subnet group.
", - "ModifyReplicationTask": "Modifies the specified replication task.
You can't modify the task endpoints. The task must be stopped before you can modify it.
For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.
", - "MoveReplicationTask": "Moves a replication task from its current replication instance to a different target replication instance using the specified parameters. The target replication instance must be created with the same or later AWS DMS version as the current replication instance.
", + "ModifyReplicationTask": "Modifies the specified replication task.
You can't modify the task endpoints. The task must be stopped before you can modify it.
For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.
", + "MoveReplicationTask": "Moves a replication task from its current replication instance to a different target replication instance using the specified parameters. The target replication instance must be created with the same or later DMS version as the current replication instance.
", "RebootReplicationInstance": "Reboots a replication instance. Rebooting results in a momentary outage, until the replication instance becomes available again.
", "RefreshSchemas": "Populates the schema for the specified endpoint. This is an asynchronous operation and can take several minutes. You can check the status of this operation by calling the DescribeRefreshSchemasStatus operation.
", "ReloadTables": "Reloads the target database table with the source data.
", - "RemoveTagsFromResource": "Removes metadata tags from an AWS DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag
data type description.
Starts the replication task.
For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.
", + "RemoveTagsFromResource": "Removes metadata tags from an DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see Tag
data type description.
Starts the replication task.
For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.
", "StartReplicationTaskAssessment": "Starts the replication task assessment for unsupported data types in the source database.
", "StartReplicationTaskAssessmentRun": "Starts a new premigration assessment run for one or more individual assessments of a migration task.
The assessments that you can specify depend on the source and target database engine and the migration type defined for the given task. To run this operation, your migration task must already be created. After you run this operation, you can review the status of each individual assessment. You can also run the migration task manually after the assessment run and its individual assessments complete.
", "StopReplicationTask": "Stops the replication task.
", @@ -60,12 +60,12 @@ }, "shapes": { "AccessDeniedFault": { - "base": "AWS DMS was denied access to the endpoint. Check that the role is correctly configured.
", + "base": "DMS was denied access to the endpoint. Check that the role is correctly configured.
", "refs": { } }, "AccountQuota": { - "base": "Describes a quota for an AWS account, for example, the number of replication instances allowed.
", + "base": "Describes a quota for an account, for example the number of replication instances allowed.
", "refs": { "AccountQuotaList$member": null } @@ -77,7 +77,7 @@ } }, "AddTagsToResourceMessage": { - "base": "Associates a set of tags with an AWS DMS resource.
", + "base": "Associates a set of tags with an DMS resource.
", "refs": { } }, @@ -109,7 +109,7 @@ } }, "AvailabilityZone": { - "base": "The name of an Availability Zone for use during database migration. AvailabilityZone
is an optional parameter to the CreateReplicationInstance
operation, and it’s value relates to the AWS Region of an endpoint. For example, the availability zone of an endpoint in the us-east-1 region might be us-east-1a, us-east-1b, us-east-1c, or us-east-1d.
The name of an Availability Zone for use during database migration. AvailabilityZone
is an optional parameter to the CreateReplicationInstance
operation, and it’s value relates to the Region of an endpoint. For example, the availability zone of an endpoint in the us-east-1 region might be us-east-1a, us-east-1b, us-east-1c, or us-east-1d.
The Availability Zone of the subnet.
" } @@ -129,7 +129,7 @@ "ReplicationInstance$MultiAZ": " Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone
parameter if the Multi-AZ parameter is set to true
.
Boolean value indicating if minor version upgrades will be automatically applied to the instance.
", "ReplicationInstance$PubliclyAccessible": " Specifies the accessibility options for the replication instance. A value of true
represents an instance with a public IP address. A value of false
represents an instance with a private IP address. The default value is true
.
Indicates if Change Data Capture (CDC) is supported.
" + "SupportedEndpointType$SupportsCDC": "Indicates if change data capture (CDC) is supported.
" } }, "BooleanOptional": { @@ -144,26 +144,29 @@ "EndpointSetting$Sensitive": "A value that marks this endpoint setting as sensitive.
", "IBMDb2Settings$SetDataCaptureChanges": "Enables ongoing replication (CDC) as a BOOLEAN value. The default is true.
", "KafkaSettings$IncludeTransactionDetails": "Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id
, previous transaction_id
, and transaction_record_id
(the record offset within a transaction). The default is false
.
Shows the partition value within the Kafka message output, unless the partition type is schema-table-type
. The default is false
.
Shows the partition value within the Kafka message output unless the partition type is schema-table-type
. The default is false
.
Prefixes schema and table names to partition values, when the partition type is primary-key-type
. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. The default is false
.
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table
, drop-table
, add-column
, drop-column
, and rename-column
. The default is false
.
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. The default is false
.
Include NULL and empty columns for records migrated to the endpoint. The default is false
.
If this attribute is Y, it allows hexadecimal values that don't have the 0x
prefix when migrated to a Kafka target. If this attribute is N, all hexadecimal values include this prefix when migrated to Kafka.
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id
, previous transaction_id
, and transaction_record_id
(the record offset within a transaction). The default is false
.
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type
. The default is false
.
Prefixes schema and table names to partition values, when the partition type is primary-key-type
. Doing this increases data distribution among Kinesis shards. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same shard, which causes throttling. The default is false
.
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table
, drop-table
, add-column
, drop-column
, and rename-column
. The default is false
.
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. The default is false
.
Include NULL and empty columns for records migrated to the endpoint. The default is false
.
If this attribute is Y, it allows hexadecimal values that don't have the 0x
prefix when migrated to a Kinesis target. If this attribute is N, all hexadecimal values include this prefix when migrated to Kinesis.
Cleans and recreates table metadata information on the replication instance when a mismatch occurs. An example is a situation where running an alter DDL statement on a table might result in different information about the table cached in the replication instance.
", - "MicrosoftSQLServerSettings$ReadBackupOnly": "When this attribute is set to Y
, AWS DMS only reads changes from transaction log backups and doesn't read from the active transaction log file during ongoing replication. Setting this parameter to Y
enables you to control active transaction log file growth during full load and ongoing replication tasks. However, it can add some source latency to ongoing replication.
When this attribute is set to Y
, DMS only reads changes from transaction log backups and doesn't read from the active transaction log file during ongoing replication. Setting this parameter to Y
enables you to control active transaction log file growth during full load and ongoing replication tasks. However, it can add some source latency to ongoing replication.
Use this to attribute to transfer data for full-load operations using BCP. When the target table contains an identity column that does not exist in the source table, you must disable the use BCP for loading table option.
", "MicrosoftSQLServerSettings$UseThirdPartyBackupDevice": "When this attribute is set to Y
, DMS processes third-party transaction log backups if they are created in native format.
If this attribute is Y, the current call to ModifyEndpoint
replaces all existing endpoint settings with the exact settings that you specify in this call. If this attribute is N, the current call to ModifyEndpoint
does two things:
It replaces any endpoint settings that already exist with new values, for settings with the same names.
It creates new endpoint settings that you specify in the call, for settings with different names.
For example, if you call create-endpoint ... --endpoint-settings '{\"a\":1}' ...
, the endpoint has the following endpoint settings: '{\"a\":1}'
. If you then call modify-endpoint ... --endpoint-settings '{\"b\":2}' ...
for the same endpoint, the endpoint has the following settings: '{\"a\":1,\"b\":2}'
.
However, suppose that you follow this with a call to modify-endpoint ... --endpoint-settings '{\"b\":2}' --exact-settings ...
for that same endpoint again. Then the endpoint has the following settings: '{\"b\":2}'
. All existing settings are replaced with the exact settings that you specify.
A Boolean value; set to true to activate the subscription.
", "ModifyReplicationInstanceMessage$MultiAZ": " Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone
parameter if the Multi-AZ parameter is set to true
.
A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case described following. The change is asynchronously applied as soon as possible.
An outage does result if these factors apply:
This parameter is set to true
during the maintenance window.
A newer minor version is available.
AWS DMS has enabled automatic patching for the given engine version.
A value that indicates that minor version upgrades are applied automatically to the replication instance during the maintenance window. Changing this parameter doesn't result in an outage, except in the case described following. The change is asynchronously applied as soon as possible.
An outage does result if these factors apply:
This parameter is set to true
during the maintenance window.
A newer minor version is available.
DMS has enabled automatic patching for the given engine version.
Adjusts the behavior of DMS when migrating from an SQL Server source database that is hosted as part of an Always On availability group cluster. If you need DMS to poll all the nodes in the Always On cluster for transaction backups, set this attribute to false
.
If you want AWS Identity and Access Management (IAM) authorization enabled for this endpoint, set this parameter to true
. Then attach the appropriate IAM policy document to your service role specified by ServiceAccessRoleArn
. The default is false
.
If you want Identity and Access Management (IAM) authorization enabled for this endpoint, set this parameter to true
. Then attach the appropriate IAM policy document to your service role specified by ServiceAccessRoleArn
. The default is false
.
Set this attribute to set up table-level supplemental logging for the Oracle database. This attribute enables PRIMARY KEY supplemental logging on all tables selected for a migration task.
If you use this option, you still need to enable database-level supplemental logging.
", "OracleSettings$AllowSelectNestedTables": "Set this attribute to true
to enable replication of Oracle tables containing columns that are nested tables or defined types.
Set this attribute to false
in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This tells the DMS instance to not access redo logs through any specified path prefix replacement using direct file access.
Set this attribute to true in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This setting tells DMS instance to replace the default Oracle root with the specified usePathPrefix
setting to access the redo logs.
Set this attribute to enable homogenous tablespace replication and create existing tables or indexes under the same tablespace on the target.
", "OracleSettings$DirectPathNoLog": "When set to true
, this attribute helps to increase the commit rate on the Oracle target database by writing directly to tables and not writing a trail to database logs.
When this field is set to Y
, AWS DMS only accesses the archived redo logs. If the archived redo logs are stored on Oracle ASM only, the AWS DMS user account needs to be granted ASM privileges.
When set to true
, this attribute specifies a parallel load when useDirectPathFullLoad
is set to Y
. This attribute also only applies when you use the AWS DMS parallel load feature. Note that the target table cannot have any constraints or indexes.
When this field is set to Y
, DMS only accesses the archived redo logs. If the archived redo logs are stored on Oracle ASM only, the DMS user account needs to be granted ASM privileges.
When set to true
, this attribute specifies a parallel load when useDirectPathFullLoad
is set to Y
. This attribute also only applies when you use the DMS parallel load feature. Note that the target table cannot have any constraints or indexes.
When set to true
, this attribute causes a task to fail if the actual size of an LOB column is greater than the specified LobMaxSize
.
If a task is set to limited LOB mode and this option is set to true
, the task fails instead of truncating the LOB data.
When set to true
, this attribute supports tablespace replication.
To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.
If this value is set to N
, you don't have to create tables or triggers on the source database.
Set this attribute to Y to capture change data using the Binary Reader utility. Set UseLogminerReader
to N to set this attribute to Y. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC.
Set this attribute to Y to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load.
", + "OracleSettings$UseLogminerReader": "Set this attribute to Y to capture change data using the Oracle LogMiner utility (the default). Set this attribute to N if you want to access the redo logs as a binary file. When you set UseLogminerReader
to N, also set UseBfile
to Y. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide.
To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.
If this value is set to N
, you don't have to create tables or triggers on the source database.
When set to true
, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize
.
If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.
", + "PostgreSQLSettings$HeartbeatEnable": "If this attribute is set to true, the write-ahead log (WAL) heartbeat keeps restart_lsn
moving and prevents storage full scenarios. The WAL heartbeat mimics a dummy transaction, so that idle logical replication slots don't hold onto old WAL logs that result in storage full situations on the source.
If this parameter is true
, the reboot is conducted through a Multi-AZ failover. (If the instance isn't configured for Multi-AZ, then you can't specify true
.)
A value that indicates to allow any date format, including invalid formats such as 00/00/00 00:00:00, to be loaded without generating an error. You can choose true
or false
(the default).
This parameter applies only to TIMESTAMP and DATE columns. Always use ACCEPTANYDATE with the DATEFORMAT parameter. If the date format for the data doesn't match the DATEFORMAT specification, Amazon Redshift inserts a NULL value into that field.
", "RedshiftSettings$CaseSensitiveNames": "If Amazon Redshift is configured to support case sensitive schema names, set CaseSensitiveNames
to true
. The default is false
.
If you set CompUpdate
to true
Amazon Redshift applies automatic compression if the table is empty. This applies even if the table columns already have encodings other than RAW
. If you set CompUpdate
to false
, automatic compression is disabled and existing column encodings aren't changed. The default is true
.
A value that specifies whether AWS DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of true
sets empty CHAR and VARCHAR fields to null. The default is false
.
A value that specifies whether DMS should migrate empty CHAR and VARCHAR fields as NULL. A value of true
sets empty CHAR and VARCHAR fields to null. The default is false
.
This setting is only valid for a full-load migration task. Set ExplicitIds
to true
to have tables with IDENTITY
columns override their auto-generated values with explicit values loaded from the source data files used to populate the tables. The default is false
.
A value that specifies to remove surrounding quotation marks from strings in the incoming data. All characters within the quotation marks, including delimiters, are retained. Choose true
to remove quotation marks. The default is false
.
A value that specifies to remove the trailing white space characters from a VARCHAR string. This parameter applies only to columns with a VARCHAR data type. Choose true
to remove unneeded white space. The default is false
.
A value that specifies to truncate data in columns to the appropriate number of characters, so that the data fits in the column. This parameter applies only to columns with a VARCHAR or CHAR data type, and rows with a size of 4 MB or less. Choose true
to truncate data. The default is false
.
Specifies whether the replication instance is a Multi-AZ deployment. You can't set the AvailabilityZone
parameter if the Multi-AZ parameter is set to true
.
A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics, false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and MIN
values. This parameter defaults to true
. This value is used for .parquet file format only.
A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only to indicate how the rows were added to the source database.
AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
For full load, records can only be inserted. By default (the false
setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.
This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
parameters for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.
If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
for the same endpoint, but not both.
A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3 object file in .parquet format.
AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS writes all TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes them with microsecond precision.
Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan to query or process the data with Athena or AWS Glue.
AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond precision.
Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column value that is inserted by setting the TimestampColumnName
parameter.
A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet (columnar storage) output files. The default setting is false
, but when CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs from the source database are migrated to the .csv or .parquet file.
For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record is set to either I
or U
to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad
is set to false
, CDC records are written without an indication of INSERT or UPDATE operations at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
for the same endpoint, but not both.
When set to true
, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false
. For more information about date-based folder partitoning, see Using date-based folder partitioning.
This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format. If set to true
for columns not included in the supplemental log, AWS DMS uses the value specified by CsvNoSupValue
. If not set or set to false
, AWS DMS uses the null value for these columns.
This setting is supported in AWS DMS versions 3.4.1 and later.
If set to true
, AWS DMS saves the transaction order for a change data capture (CDC) load on the Amazon S3 target specified by CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
This setting is supported in AWS DMS versions 3.4.2 and later.
A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only to indicate how the rows were added to the source database.
DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
For full load, records can only be inserted. By default (the false
setting), no information is recorded in these output files for a full load to indicate that the rows were inserted at the source database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I annotation in the first field of the .csv file. This allows the format of your target records from a full load to be consistent with the target records from a CDC load.
This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
parameters for output to .csv files only. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was inserted, updated, or deleted at the source database for a CDC load to the target.
If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record is set to I to indicate the INSERT operation at the source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a first field to indicate the INSERT operation at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
DMS supports the interaction described preceding between the CdcInsertsOnly
and IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
for the same endpoint, but not both.
A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3 object file in .parquet format.
DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes all TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes them with microsecond precision.
Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan to query or process the data with Athena or Glue.
DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond precision.
Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column value that is inserted by setting the TimestampColumnName
parameter.
A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet (columnar storage) output files. The default setting is false
, but when CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs from the source database are migrated to the .csv or .parquet file.
For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record is set to either I
or U
to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad
is set to false
, CDC records are written without an indication of INSERT or UPDATE operations at the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
for the same endpoint, but not both.
When set to true
, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false
. For more information about date-based folder partitioning, see Using date-based folder partitioning.
This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format. If set to true
for columns not included in the supplemental log, DMS uses the value specified by CsvNoSupValue
. If not set or set to false
, DMS uses the null value for these columns.
This setting is supported in DMS versions 3.4.1 and later.
If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the Amazon S3 target specified by CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
This setting is supported in DMS versions 3.4.2 and later.
A value that indicates if the table was reloaded (true
) or loaded as part of a new full load operation (false
).
The location of an imported Oracle Wallet certificate for use with SSL.
", - "ImportCertificateMessage$CertificateWallet": "The location of an imported Oracle Wallet certificate for use with SSL.
" + "ImportCertificateMessage$CertificateWallet": "The location of an imported Oracle Wallet certificate for use with SSL. Provide the name of a .sso
file using the fileb://
prefix. You can't provide the certificate inline.
The settings in JSON format for the DMS Transfer type source endpoint.
", "refs": { - "CreateEndpointMessage$DmsTransferSettings": "The settings in JSON format for the DMS transfer type of source endpoint.
Possible settings include the following:
ServiceAccessRoleArn
- The IAM role that has permission to access the Amazon S3 bucket.
BucketName
- The name of the S3 bucket to use.
CompressionType
- An optional parameter to use GZIP to compress the target files. To use GZIP, set this value to NONE
(the default). To keep the files uncompressed, don't use this value.
Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }
The settings in JSON format for the DMS transfer type of source endpoint.
Possible settings include the following:
ServiceAccessRoleArn
- The IAM role that has permission to access the Amazon S3 bucket.
BucketName
- The name of the S3 bucket to use.
CompressionType
- An optional parameter to use GZIP to compress the target files. To use GZIP, set this value to NONE
(the default). To keep the files uncompressed, don't use this value.
Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }
The settings in JSON format for the DMS transfer type of source endpoint.
Attributes include the following:
serviceAccessRoleArn - The AWS Identity and Access Management (IAM) role that has permission to access the Amazon S3 bucket.
BucketName - The name of the S3 bucket to use.
compressionType - An optional parameter to use GZIP to compress the target files. Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed.
Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }
The settings in JSON format for the DMS transfer type of source endpoint.
Possible settings include the following:
ServiceAccessRoleArn
- The IAM role that has permission to access the Amazon S3 bucket. The role must allow the iam:PassRole
action.
BucketName
- The name of the S3 bucket to use.
Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", }
The settings in JSON format for the DMS transfer type of source endpoint.
Possible settings include the following:
ServiceAccessRoleArn
- The IAM role that has permission to access the Amazon S3 bucket. The role must allow the iam:PassRole
action.
BucketName
- The name of the S3 bucket to use.
Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\"}
The settings in JSON format for the DMS transfer type of source endpoint.
Attributes include the following:
serviceAccessRoleArn - The Identity and Access Management (IAM) role that has permission to access the Amazon S3 bucket. The role must allow the iam:PassRole
action.
BucketName - The name of the S3 bucket to use.
Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string ,BucketName=string
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\"}
Settings in JSON format for the source DocumentDB endpoint. For more information about the available settings, see the configuration properties section in Using DocumentDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$DocDbSettings": "Settings in JSON format for the source DocumentDB endpoint. For more information about the available settings, see the configuration properties section in Using DocumentDB as a Target for Database Migration Service in the Database Migration Service User Guide.
" } }, "DynamoDbSettings": { - "base": "Provides the Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role used to define an Amazon DynamoDB target endpoint.
", + "base": "Provides the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role used to define an Amazon DynamoDB target endpoint.
", "refs": { - "CreateEndpointMessage$DynamoDbSettings": "Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$DynamoDbSettings": "Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the Database Migration Service User Guide.
", "Endpoint$DynamoDbSettings": "The settings for the DynamoDB target endpoint. For more information, see the DynamoDBSettings
structure.
Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$DynamoDbSettings": "Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the Database Migration Service User Guide.
" } }, "ElasticsearchSettings": { "base": "Provides information that defines an Elasticsearch endpoint.
", "refs": { - "CreateEndpointMessage$ElasticsearchSettings": "Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$ElasticsearchSettings": "Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for DMS in the Database Migration Service User Guide.
", "Endpoint$ElasticsearchSettings": "The settings for the Elasticsearch source endpoint. For more information, see the ElasticsearchSettings
structure.
Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$ElasticsearchSettings": "Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for DMS in the Database Migration Service User Guide.
" } }, "EncodingTypeValue": { @@ -672,12 +679,12 @@ "EncryptionModeValue": { "base": null, "refs": { - "RedshiftSettings$EncryptionMode": "The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
(the default) or SSE_KMS
.
For the ModifyEndpoint
operation, you can change the existing value of the EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the existing value from SSE_S3
to SSE_KMS
.
To use SSE_S3
, create an AWS Identity and Access Management (IAM) role with a policy that allows \"arn:aws:s3:::*\"
to use the following actions: \"s3:PutObject\", \"s3:ListBucket\"
The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
(the default) or SSE_KMS
.
For the ModifyEndpoint
operation, you can change the existing value of the EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the existing value from SSE_S3
to SSE_KMS
.
To use SSE_S3
, you need an AWS Identity and Access Management (IAM) role with permission to allow \"arn:aws:s3:::dms-*\"
to use the following actions:
s3:CreateBucket
s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
s3:GetObject
s3:PutObject
s3:DeleteObject
s3:GetObjectVersion
s3:GetBucketPolicy
s3:PutBucketPolicy
s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
(the default) or SSE_KMS
.
For the ModifyEndpoint
operation, you can change the existing value of the EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the existing value from SSE_S3
to SSE_KMS
.
To use SSE_S3
, create an Identity and Access Management (IAM) role with a policy that allows \"arn:aws:s3:::*\"
to use the following actions: \"s3:PutObject\", \"s3:ListBucket\"
The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
(the default) or SSE_KMS
.
For the ModifyEndpoint
operation, you can change the existing value of the EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the existing value from SSE_S3
to SSE_KMS
.
To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow \"arn:aws:s3:::dms-*\"
to use the following actions:
s3:CreateBucket
s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
s3:GetObject
s3:PutObject
s3:DeleteObject
s3:GetObjectVersion
s3:GetBucketPolicy
s3:PutBucketPolicy
s3:DeleteBucketPolicy
Describes an endpoint of a database instance in response to operations such as the following:
CreateEndpoint
DescribeEndpoint
DescribeEndpointTypes
ModifyEndpoint
Describes an endpoint of a database instance in response to operations such as the following:
CreateEndpoint
DescribeEndpoint
ModifyEndpoint
The endpoint that was created.
", "DeleteEndpointResponse$Endpoint": "The endpoint that was deleted.
", @@ -716,7 +723,7 @@ } }, "Event": { - "base": "Describes an identifiable significant activity that affects a replication instance or task. This object can provide the message, the available event categories, the date and source of the event, and the AWS DMS resource type.
", + "base": "Describes an identifiable significant activity that affects a replication instance or task. This object can provide the message, the available event categories, the date and source of the event, and the DMS resource type.
", "refs": { "EventList$member": null } @@ -724,7 +731,7 @@ "EventCategoriesList": { "base": null, "refs": { - "CreateEventSubscriptionMessage$EventCategories": "A list of event categories for a source type that you want to subscribe to. For more information, see Working with Events and Notifications in the AWS Database Migration Service User Guide.
", + "CreateEventSubscriptionMessage$EventCategories": "A list of event categories for a source type that you want to subscribe to. For more information, see Working with Events and Notifications in the Database Migration Service User Guide.
", "DescribeEventsMessage$EventCategories": "A list of event categories for the source type that you've chosen.
", "Event$EventCategories": "The event categories available for the specified source type.
", "EventCategoryGroup$EventCategories": "A list of event categories from a source type that you've chosen.
", @@ -733,7 +740,7 @@ } }, "EventCategoryGroup": { - "base": "Lists categories of events subscribed to, and generated by, the applicable AWS DMS resource type. This data type appears in response to the DescribeEventCategories
action.
Lists categories of events subscribed to, and generated by, the applicable DMS resource type. This data type appears in response to the DescribeEventCategories
action.
Space-separated list of names for specific individual assessments that you want to exclude. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn
.
You can't set a value for Exclude
if you also set a value for IncludeOnly
in the API operation.
To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments
operation using its own ReplicationTaskArn
request parameter.
Space-separated list of names for specific individual assessments that you want to exclude. These names come from the default list of individual assessments that DMS supports for the associated migration task. This task is specified by ReplicationTaskArn
.
You can't set a value for Exclude
if you also set a value for IncludeOnly
in the API operation.
To identify the names of the default individual assessments that DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments
operation using its own ReplicationTaskArn
request parameter.
Identifies the name and value of a filter object. This filter is used to limit the number and type of AWS DMS objects that are returned for a particular Describe*
call or similar operation. Filters are used as an optional parameter for certain API operations.
Identifies the name and value of a filter object. This filter is used to limit the number and type of DMS objects that are returned for a particular Describe*
call or similar operation. Filters are used as an optional parameter for certain API operations.
Provides information that defines an IBM Db2 LUW endpoint.
", "refs": { - "CreateEndpointMessage$IBMDb2Settings": "Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$IBMDb2Settings": "Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for DMS in the Database Migration Service User Guide.
", "Endpoint$IBMDb2Settings": "The settings for the IBM Db2 LUW source endpoint. For more information, see the IBMDb2Settings
structure.
Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$IBMDb2Settings": "Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for DMS in the Database Migration Service User Guide.
" } }, "ImportCertificateMessage": { @@ -851,13 +858,13 @@ "IncludeTestList": { "base": null, "refs": { - "StartReplicationTaskAssessmentRunMessage$IncludeOnly": "Space-separated list of names for specific individual assessments that you want to include. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn
.
You can't set a value for IncludeOnly
if you also set a value for Exclude
in the API operation.
To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments
operation using its own ReplicationTaskArn
request parameter.
Space-separated list of names for specific individual assessments that you want to include. These names come from the default list of individual assessments that DMS supports for the associated migration task. This task is specified by ReplicationTaskArn
.
You can't set a value for IncludeOnly
if you also set a value for Exclude
in the API operation.
To identify the names of the default individual assessments that DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments
operation using its own ReplicationTaskArn
request parameter.
List of names for the individual assessments supported by the premigration assessment run that you start based on the specified request parameters. For more information on the available individual assessments, including compatibility with different migration task configurations, see Working with premigration assessment runs in the AWS Database Migration Service User Guide.
" + "DescribeApplicableIndividualAssessmentsResponse$IndividualAssessmentNames": "List of names for the individual assessments supported by the premigration assessment run that you start based on the specified request parameters. For more information on the available individual assessments, including compatibility with different migration task configurations, see Working with premigration assessment runs in the Database Migration Service User Guide.
" } }, "InsufficientResourceCapacityFault": { @@ -915,7 +922,7 @@ "Endpoint$Port": "The port value used to access the endpoint.
", "EndpointSetting$IntValueMin": "The minimum value of an endpoint setting that is of type int
.
The maximum value of an endpoint setting that is of type int
.
Endpoint TCP port.
", + "IBMDb2Settings$Port": "Endpoint TCP port. The default value is 50000.
", "IBMDb2Settings$MaxKBytesPerRead": "Maximum number of bytes per read, as a NUMBER value. The default is 64 KB.
", "KafkaSettings$MessageMaxBytes": "The maximum size in bytes for records created on the endpoint The default is 1,000,000.
", "MicrosoftSQLServerSettings$Port": "Endpoint TCP port.
", @@ -923,26 +930,28 @@ "ModifyEndpointMessage$Port": "The port used by the endpoint database.
", "ModifyReplicationInstanceMessage$AllocatedStorage": "The amount of storage (in gigabytes) to be allocated for the replication instance.
", "MongoDbSettings$Port": "The port value for the MongoDB source endpoint.
", - "MySQLSettings$EventsPollInterval": "Specifies how often to check the binary log for new changes/events when the database is idle.
Example: eventsPollInterval=5;
In the example, AWS DMS checks for changes in the binary logs every five seconds.
", + "MySQLSettings$EventsPollInterval": "Specifies how often to check the binary log for new changes/events when the database is idle.
Example: eventsPollInterval=5;
In the example, DMS checks for changes in the binary logs every five seconds.
", "MySQLSettings$MaxFileSize": "Specifies the maximum size (in KB) of any .csv file used to transfer data to a MySQL-compatible database.
Example: maxFileSize=512
Improves performance when loading data into the MySQL-compatible target database. Specifies how many threads to use to load the data into the MySQL-compatible target database. Setting a large number of threads can have an adverse effect on database performance, because a separate connection is required for each thread.
Example: parallelLoadThreads=1
Endpoint TCP port.
", - "NeptuneSettings$ErrorRetryDuration": "The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.
", - "NeptuneSettings$MaxFileSize": "The maximum size in kilobytes of migrated graph data stored in a .csv file before AWS DMS bulk-loads the data to the Neptune target database. The default is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, ready to store the next batch of migrated graph data.
", - "NeptuneSettings$MaxRetryCount": "The number of times for AWS DMS to retry a bulk load of migrated graph data to the Neptune target database before raising an error. The default is 5.
", - "OracleSettings$ArchivedLogDestId": "Specifies the destination of the archived redo logs. The value should be the same as the DEST_ID number in the v$archived_log table. When working with multiple log destinations (DEST_ID), we recommend that you to specify an archived redo logs location identifier. Doing this improves performance by ensuring that the correct logs are accessed from the outset.
", - "OracleSettings$AdditionalArchivedLogDestId": "Set this attribute with archivedLogDestId
in a primary/ standby setup. This attribute is useful in the case of a switchover. In this case, AWS DMS needs to know which destination to get archive redo logs from to read changes. This need arises because the previous primary instance is now a standby instance after switchover.
Set this attribute to change the number of threads that DMS configures to perform a Change Data Capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 2 (the default) and 8 (the maximum). Use this attribute together with the readAheadBlocks
attribute.
Set this attribute to change the number of read-ahead blocks that DMS configures to perform a Change Data Capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 1000 (the default) and 200,000 (the maximum).
", + "NeptuneSettings$ErrorRetryDuration": "The number of milliseconds for DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.
", + "NeptuneSettings$MaxFileSize": "The maximum size in kilobytes of migrated graph data stored in a .csv file before DMS bulk-loads the data to the Neptune target database. The default is 1,048,576 KB. If the bulk load is successful, DMS clears the bucket, ready to store the next batch of migrated graph data.
", + "NeptuneSettings$MaxRetryCount": "The number of times for DMS to retry a bulk load of migrated graph data to the Neptune target database before raising an error. The default is 5.
", + "OracleSettings$ArchivedLogDestId": "Specifies the ID of the destination for the archived redo logs. This value should be the same as a number in the dest_id column of the v$archived_log view. If you work with an additional redo log destination, use the AdditionalArchivedLogDestId
option to specify the additional destination ID. Doing this improves performance by ensuring that the correct logs are accessed from the outset.
Set this attribute with ArchivedLogDestId
in a primary/ standby setup. This attribute is useful in the case of a switchover. In this case, DMS needs to know which destination to get archive redo logs from to read changes. This need arises because the previous primary instance is now a standby instance after switchover.
Although DMS supports the use of the Oracle RESETLOGS
option to open the database, never use RESETLOGS
unless necessary. For additional information about RESETLOGS
, see RMAN Data Repair Concepts in the Oracle Database Backup and Recovery User's Guide.
Set this attribute to change the number of threads that DMS configures to perform a change data capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 2 (the default) and 8 (the maximum). Use this attribute together with the readAheadBlocks
attribute.
Set this attribute to change the number of read-ahead blocks that DMS configures to perform a change data capture (CDC) load using Oracle Automatic Storage Management (ASM). You can specify an integer value between 1000 (the default) and 200,000 (the maximum).
", "OracleSettings$NumberDatatypeScale": "Specifies the number scale. You can select a scale up to 38, or you can select FLOAT. By default, the NUMBER data type is converted to precision 38, scale 10.
Example: numberDataTypeScale=12
Endpoint TCP port.
", "OracleSettings$RetryInterval": "Specifies the number of seconds that the system waits before resending a query.
Example: retryInterval=6;
Use this attribute to specify a time in minutes for the delay in standby sync. If the source is an Oracle Active Data Guard standby database, use this attribute to specify the time lag between primary and standby databases.
In DMS, you can create an Oracle CDC task that uses an Active Data Guard standby instance as a source for replicating ongoing changes. Doing this eliminates the need to connect to an active database that might be in production.
", "PostgreSQLSettings$MaxFileSize": "Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.
Example: maxFileSize=512
Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds.
Example: executeTimeout=100;
Sets the WAL heartbeat frequency (in minutes).
", "PostgreSQLSettings$Port": "Endpoint TCP port.
", "RedshiftSettings$ConnectionTimeout": "A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.
", "RedshiftSettings$FileTransferUploadStreams": "The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults to 10.
The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart Upload. For more information, see Multipart upload overview.
FileTransferUploadStreams
accepts a value from 1 through 64. It defaults to 10.
The amount of time to wait (in milliseconds) before timing out of operations performed by AWS DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE.
", + "RedshiftSettings$LoadTimeout": "The amount of time to wait (in milliseconds) before timing out of operations performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, and UPDATE.
", "RedshiftSettings$MaxFileSize": "The maximum size (in KB) of any .csv file used to load data on an S3 bucket and transfer data to Amazon Redshift. It defaults to 1048576KB (1 GB).
", "RedshiftSettings$Port": "The port number for Amazon Redshift. The default value is 5439.
", "RedshiftSettings$WriteBufferSize": "The size (in KB) of the in-memory file write buffer used when generating .csv files on the local disk at the DMS replication instance. The default value is 1000 (buffer size is 1000KB).
", @@ -979,27 +988,27 @@ } }, "KMSFault": { - "base": "An AWS Key Management Service (AWS KMS) error is preventing access to AWS KMS.
", + "base": "An Key Management Service (KMS) error is preventing access to KMS.
", "refs": { } }, "KMSInvalidStateFault": { - "base": "The state of the specified AWS KMS resource isn't valid for this request.
", + "base": "The state of the specified KMS resource isn't valid for this request.
", "refs": { } }, "KMSKeyNotAccessibleFault": { - "base": "AWS DMS cannot access the AWS KMS key.
", + "base": "DMS cannot access the KMS key.
", "refs": { } }, "KMSNotFoundFault": { - "base": "The specified AWS KMS entity or resource can't be found.
", + "base": "The specified KMS entity or resource can't be found.
", "refs": { } }, "KMSThrottlingFault": { - "base": "This request triggered AWS KMS request throttling.
", + "base": "This request triggered KMS request throttling.
", "refs": { } }, @@ -1012,9 +1021,9 @@ "KafkaSettings": { "base": "Provides information that describes an Apache Kafka endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.
", "refs": { - "CreateEndpointMessage$KafkaSettings": "Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$KafkaSettings": "Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using object mapping to migrate data to a Kafka topic in the Database Migration Service User Guide.
", "Endpoint$KafkaSettings": "The settings for the Apache Kafka target endpoint. For more information, see the KafkaSettings
structure.
Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$KafkaSettings": "Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using object mapping to migrate data to a Kafka topic in the Database Migration Service User Guide.
" } }, "KeyList": { @@ -1026,9 +1035,9 @@ "KinesisSettings": { "base": "Provides information that describes an Amazon Kinesis Data Stream endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.
", "refs": { - "CreateEndpointMessage$KinesisSettings": "Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$KinesisSettings": "Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using object mapping to migrate data to a Kinesis data stream in the Database Migration Service User Guide.
", "Endpoint$KinesisSettings": "The settings for the Amazon Kinesis target endpoint. For more information, see the KinesisSettings
structure.
Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$KinesisSettings": "Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using object mapping to migrate data to a Kinesis data stream in the Database Migration Service User Guide.
" } }, "ListTagsForResourceMessage": { @@ -1070,9 +1079,9 @@ "MicrosoftSQLServerSettings": { "base": "Provides information that defines a Microsoft SQL Server endpoint.
", "refs": { - "CreateEndpointMessage$MicrosoftSQLServerSettings": "Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$MicrosoftSQLServerSettings": "Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for DMS and Extra connection attributes when using SQL Server as a target for DMS in the Database Migration Service User Guide.
", "Endpoint$MicrosoftSQLServerSettings": "The settings for the Microsoft SQL Server source and target endpoint. For more information, see the MicrosoftSQLServerSettings
structure.
Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$MicrosoftSQLServerSettings": "Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for DMS and Extra connection attributes when using SQL Server as a target for DMS in the Database Migration Service User Guide.
" } }, "MigrationTypeValue": { @@ -1137,9 +1146,9 @@ "MongoDbSettings": { "base": "Provides information that defines a MongoDB endpoint.
", "refs": { - "CreateEndpointMessage$MongoDbSettings": "Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$MongoDbSettings": "Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see Endpoint configuration settings when using MongoDB as a source for Database Migration Service in the Database Migration Service User Guide.
", "Endpoint$MongoDbSettings": "The settings for the MongoDB source endpoint. For more information, see the MongoDbSettings
structure.
Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$MongoDbSettings": "Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Endpoint configuration settings when using MongoDB as a source for Database Migration Service in the Database Migration Service User Guide.
" } }, "MoveReplicationTaskMessage": { @@ -1155,17 +1164,17 @@ "MySQLSettings": { "base": "Provides information that defines a MySQL endpoint.
", "refs": { - "CreateEndpointMessage$MySQLSettings": "Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$MySQLSettings": "Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for DMS and Extra connection attributes when using a MySQL-compatible database as a target for DMS in the Database Migration Service User Guide.
", "Endpoint$MySQLSettings": "The settings for the MySQL source and target endpoint. For more information, see the MySQLSettings
structure.
Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$MySQLSettings": "Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for DMS and Extra connection attributes when using a MySQL-compatible database as a target for DMS in the Database Migration Service User Guide.
" } }, "NeptuneSettings": { "base": "Provides information that defines an Amazon Neptune endpoint.
", "refs": { - "CreateEndpointMessage$NeptuneSettings": "Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$NeptuneSettings": "Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying graph-mapping rules using Gremlin and R2RML for Amazon Neptune as a target in the Database Migration Service User Guide.
", "Endpoint$NeptuneSettings": "The settings for the Amazon Neptune target endpoint. For more information, see the NeptuneSettings
structure.
Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$NeptuneSettings": "Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying graph-mapping rules using Gremlin and R2RML for Amazon Neptune as a target in the Database Migration Service User Guide.
" } }, "NestingLevelValue": { @@ -1178,9 +1187,9 @@ "OracleSettings": { "base": "Provides information that defines an Oracle endpoint.
", "refs": { - "CreateEndpointMessage$OracleSettings": "Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$OracleSettings": "Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for DMS and Extra connection attributes when using Oracle as a target for DMS in the Database Migration Service User Guide.
", "Endpoint$OracleSettings": "The settings for the Oracle source and target endpoint. For more information, see the OracleSettings
structure.
Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$OracleSettings": "Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for DMS and Extra connection attributes when using Oracle as a target for DMS in the Database Migration Service User Guide.
" } }, "OrderableReplicationInstance": { @@ -1202,7 +1211,7 @@ } }, "PendingMaintenanceAction": { - "base": "Describes a maintenance action pending for an AWS DMS resource, including when and how it will be applied. This data type is a response element to the DescribePendingMaintenanceActions
operation.
Describes a maintenance action pending for an DMS resource, including when and how it will be applied. This data type is a response element to the DescribePendingMaintenanceActions
operation.
The pending maintenance action.
" } }, + "PluginNameValue": { + "base": null, + "refs": { + "PostgreSQLSettings$PluginName": "Specifies the plugin to use to create a replication slot.
" + } + }, "PostgreSQLSettings": { "base": "Provides information that defines a PostgreSQL endpoint.
", "refs": { - "CreateEndpointMessage$PostgreSQLSettings": "Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$PostgreSQLSettings": "Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for DMS and Extra connection attributes when using PostgreSQL as a target for DMS in the Database Migration Service User Guide.
", "Endpoint$PostgreSQLSettings": "The settings for the PostgreSQL source and target endpoint. For more information, see the PostgreSQLSettings
structure.
Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$PostgreSQLSettings": "Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for DMS and Extra connection attributes when using PostgreSQL as a target for DMS in the Database Migration Service User Guide.
" } }, "RebootReplicationInstanceMessage": { @@ -1271,7 +1286,7 @@ "ReleaseStatusValues": { "base": null, "refs": { - "OrderableReplicationInstance$ReleaseStatus": "The value returned when the specified EngineVersion
of the replication instance is in Beta or test mode. This indicates some features might not work as expected.
AWS DMS supports the ReleaseStatus
parameter in versions 3.1.4 and later.
The value returned when the specified EngineVersion
of the replication instance is in Beta or test mode. This indicates some features might not work as expected.
DMS supports the ReleaseStatus
parameter in versions 3.1.4 and later.
Removes one or more tags from an AWS DMS resource.
", + "base": "Removes one or more tags from an DMS resource.
", "refs": { } }, @@ -1462,9 +1477,9 @@ } }, "ResourcePendingMaintenanceActions": { - "base": "Identifies an AWS DMS resource and any pending actions for it.
", + "base": "Identifies an DMS resource and any pending actions for it.
", "refs": { - "ApplyPendingMaintenanceActionResponse$ResourcePendingMaintenanceActions": "The AWS DMS resource that the pending maintenance action will be applied to.
", + "ApplyPendingMaintenanceActionResponse$ResourcePendingMaintenanceActions": "The DMS resource that the pending maintenance action will be applied to.
", "PendingMaintenanceActions$member": null } }, @@ -1486,9 +1501,9 @@ "S3Settings": { "base": "Settings for exporting data to Amazon S3.
", "refs": { - "CreateEndpointMessage$S3Settings": "Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$S3Settings": "Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for DMS in the Database Migration Service User Guide.
", "Endpoint$S3Settings": "The settings for the S3 target endpoint. For more information, see the S3Settings
structure.
Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$S3Settings": "Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see Extra Connection Attributes When Using Amazon S3 as a Target for DMS in the Database Migration Service User Guide.
" } }, "SNSInvalidTopicFault": { @@ -1504,7 +1519,7 @@ "SafeguardPolicy": { "base": null, "refs": { - "MicrosoftSQLServerSettings$SafeguardPolicy": "Use this attribute to minimize the need to access the backup log and enable AWS DMS to prevent truncation using one of the following two methods.
Start transactions in the database: This is the default method. When this method is used, AWS DMS prevents TLOG truncation by mimicking a transaction in the database. As long as such a transaction is open, changes that appear after the transaction started aren't truncated. If you need Microsoft Replication to be enabled in your database, then you must choose this method.
Exclusively use sp_repldone within a single task: When this method is used, AWS DMS reads the changes and then uses sp_repldone to mark the TLOG transactions as ready for truncation. Although this method doesn't involve any transactional activities, it can only be used when Microsoft Replication isn't running. Also, when using this method, only one AWS DMS task can access the database at any given time. Therefore, if you need to run parallel AWS DMS tasks against the same database, use the default method.
" + "MicrosoftSQLServerSettings$SafeguardPolicy": "Use this attribute to minimize the need to access the backup log and enable DMS to prevent truncation using one of the following two methods.
Start transactions in the database: This is the default method. When this method is used, DMS prevents TLOG truncation by mimicking a transaction in the database. As long as such a transaction is open, changes that appear after the transaction started aren't truncated. If you need Microsoft Replication to be enabled in your database, then you must choose this method.
Exclusively use sp_repldone within a single task: When this method is used, DMS reads the changes and then uses sp_repldone to mark the TLOG transactions as ready for truncation. Although this method doesn't involve any transactional activities, it can only be used when Microsoft Replication isn't running. Also, when using this method, only one DMS task can access the database at any given time. Therefore, if you need to run parallel DMS tasks against the same database, use the default method.
" } }, "SchemaList": { @@ -1528,7 +1543,7 @@ "MySQLSettings$Password": "Endpoint connection password.
", "OracleSettings$AsmPassword": "For an Oracle source endpoint, your Oracle Automatic Storage Management (ASM) password. You can set this value from the asm_user_password
value. You set this value as part of the comma-separated value that you set to the Password
request parameter when you create the endpoint to access transaction logs using Binary Reader. For more information, see Configuration for change data capture (CDC) on an Oracle source database.
Endpoint connection password.
", - "OracleSettings$SecurityDbEncryption": "For an Oracle source endpoint, the transparent data encryption (TDE) password required by AWM DMS to access Oracle redo logs encrypted by TDE using Binary Reader. It is also the TDE_Password
part of the comma-separated value you set to the Password
request parameter when you create the endpoint. The SecurityDbEncryptian
setting is related to this SecurityDbEncryptionName
setting. For more information, see Supported encryption methods for using Oracle as a source for AWS DMS in the AWS Database Migration Service User Guide.
For an Oracle source endpoint, the transparent data encryption (TDE) password required by AWM DMS to access Oracle redo logs encrypted by TDE using Binary Reader. It is also the TDE_Password
part of the comma-separated value you set to the Password
request parameter when you create the endpoint. The SecurityDbEncryptian
setting is related to this SecurityDbEncryptionName
setting. For more information, see Supported encryption methods for using Oracle as a source for DMS in the Database Migration Service User Guide.
Endpoint connection password.
", "RedshiftSettings$Password": "The password for the user named in the username
property.
Endpoint connection password.
" @@ -1537,15 +1552,15 @@ "SourceIdsList": { "base": null, "refs": { - "CreateEventSubscriptionMessage$SourceIds": "A list of identifiers for which AWS DMS provides notification events.
If you don't specify a value, notifications are provided for all sources.
If you specify multiple values, they must be of the same type. For example, if you specify a database instance ID, then all of the other values must be database instance IDs.
", + "CreateEventSubscriptionMessage$SourceIds": "A list of identifiers for which DMS provides notification events.
If you don't specify a value, notifications are provided for all sources.
If you specify multiple values, they must be of the same type. For example, if you specify a database instance ID, then all of the other values must be database instance IDs.
", "EventSubscription$SourceIdsList": "A list of source Ids for the event subscription.
" } }, "SourceType": { "base": null, "refs": { - "DescribeEventsMessage$SourceType": "The type of AWS DMS resource that generates events.
Valid values: replication-instance | replication-task
", - "Event$SourceType": "The type of AWS DMS resource that generates events.
Valid values: replication-instance | endpoint | replication-task
" + "DescribeEventsMessage$SourceType": "The type of DMS resource that generates events.
Valid values: replication-instance | replication-task
", + "Event$SourceType": "The type of DMS resource that generates events.
Valid values: replication-instance | endpoint | replication-task
" } }, "StartReplicationTaskAssessmentMessage": { @@ -1602,9 +1617,9 @@ "String": { "base": null, "refs": { - "AccountQuota$AccountQuotaName": "The name of the AWS DMS quota for this AWS account.
", - "AddTagsToResourceMessage$ResourceArn": "Identifies the AWS DMS resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN).
For AWS DMS, you can tag a replication instance, an endpoint, or a replication task.
", - "ApplyPendingMaintenanceActionMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the AWS DMS resource that the pending maintenance action applies to.
", + "AccountQuota$AccountQuotaName": "The name of the DMS quota for this account.
", + "AddTagsToResourceMessage$ResourceArn": "Identifies the DMS resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN).
For DMS, you can tag a replication instance, an endpoint, or a replication task.
", + "ApplyPendingMaintenanceActionMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to.
", "ApplyPendingMaintenanceActionMessage$ApplyAction": "The pending maintenance action to apply to this resource.
", "ApplyPendingMaintenanceActionMessage$OptInType": "A value that specifies the type of opt-in request, or undoes an opt-in request. You can't undo an opt-in request of type immediate
.
Valid values:
immediate
- Apply the maintenance action immediately.
next-maintenance
- Apply the maintenance action during the next maintenance window for the resource.
undo-opt-in
- Cancel any existing next-maintenance
opt-in requests.
The name of the Availability Zone.
", @@ -1625,37 +1640,37 @@ "CreateEndpointMessage$EngineName": "The type of engine for the endpoint. Valid values, depending on the EndpointType
value, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"docdb\"
, \"sqlserver\"
, and \"neptune\"
.
The user name to be used to log in to the endpoint database.
", "CreateEndpointMessage$ServerName": "The name of the server where the endpoint database resides.
", - "CreateEndpointMessage$DatabaseName": "The name of the endpoint database.
", - "CreateEndpointMessage$ExtraConnectionAttributes": "Additional attributes associated with the connection. Each attribute is specified as a name-value pair associated by an equal sign (=). Multiple attributes are separated by a semicolon (;) with no additional white space. For information on the attributes available for connecting your source or target endpoint, see Working with AWS DMS Endpoints in the AWS Database Migration Service User Guide.
", - "CreateEndpointMessage$KmsKeyId": "An AWS KMS key identifier that is used to encrypt the connection parameters for the endpoint.
If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key.
AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
", + "CreateEndpointMessage$DatabaseName": "The name of the endpoint database. For a MySQL source or target endpoint, do not specify DatabaseName.
", + "CreateEndpointMessage$ExtraConnectionAttributes": "Additional attributes associated with the connection. Each attribute is specified as a name-value pair associated by an equal sign (=). Multiple attributes are separated by a semicolon (;) with no additional white space. For information on the attributes available for connecting your source or target endpoint, see Working with DMS Endpoints in the Database Migration Service User Guide.
", + "CreateEndpointMessage$KmsKeyId": "An KMS key identifier that is used to encrypt the connection parameters for the endpoint.
If you don't specify a value for the KmsKeyId
parameter, then DMS uses your default encryption key.
KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.
", "CreateEndpointMessage$CertificateArn": "The Amazon Resource Name (ARN) for the certificate.
", - "CreateEndpointMessage$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) for the service access role that you want to use to create the endpoint.
", + "CreateEndpointMessage$ServiceAccessRoleArn": " The Amazon Resource Name (ARN) for the service access role that you want to use to create the endpoint. The role must allow the iam:PassRole
action.
The external table definition.
", - "CreateEndpointMessage$ResourceIdentifier": "A friendly name for the resource identifier at the end of the EndpointArn
response parameter that is returned in the created Endpoint
object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1
. For example, this value might result in the EndpointArn
value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1
. If you don't specify a ResourceIdentifier
value, AWS DMS generates a default identifier value for the end of EndpointArn
.
The name of the AWS DMS event notification subscription. This name must be less than 255 characters.
", + "CreateEndpointMessage$ResourceIdentifier": "A friendly name for the resource identifier at the end of the EndpointArn
response parameter that is returned in the created Endpoint
object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1
. For example, this value might result in the EndpointArn
value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1
. If you don't specify a ResourceIdentifier
value, DMS generates a default identifier value for the end of EndpointArn
.
The name of the DMS event notification subscription. This name must be less than 255 characters.
", "CreateEventSubscriptionMessage$SnsTopicArn": "The Amazon Resource Name (ARN) of the Amazon SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.
", - "CreateEventSubscriptionMessage$SourceType": " The type of AWS DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance
. If this value isn't specified, all events are returned.
Valid values: replication-instance
| replication-task
The type of DMS resource that generates the events. For example, if you want to be notified of events generated by a replication instance, you set this parameter to replication-instance
. If this value isn't specified, all events are returned.
Valid values: replication-instance
| replication-task
The replication instance identifier. This parameter is stored as a lowercase string.
Constraints:
Must contain 1-63 alphanumeric characters or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: myrepinstance
The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", - "CreateReplicationInstanceMessage$AvailabilityZone": "The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's AWS Region, for example: us-east-1d
The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.
", + "CreateReplicationInstanceMessage$AvailabilityZone": "The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's Region, for example: us-east-1d
A subnet group to associate with the replication instance.
", - "CreateReplicationInstanceMessage$PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
Format: ddd:hh24:mi-ddd:hh24:mi
Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
", + "CreateReplicationInstanceMessage$PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
Format: ddd:hh24:mi-ddd:hh24:mi
Default: A 30-minute window selected at random from an 8-hour block of time per Region, occurring on a random day of the week.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
", "CreateReplicationInstanceMessage$EngineVersion": "The engine version number of the replication instance.
If an engine version number is not specified when a replication instance is created, the default is the latest engine version available.
", - "CreateReplicationInstanceMessage$KmsKeyId": "An AWS KMS key identifier that is used to encrypt the data on the replication instance.
If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key.
AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
", + "CreateReplicationInstanceMessage$KmsKeyId": "An KMS key identifier that is used to encrypt the data on the replication instance.
If you don't specify a value for the KmsKeyId
parameter, then DMS uses your default encryption key.
KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.
", "CreateReplicationInstanceMessage$DnsNameServers": "A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. For example: \"1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4\"
A friendly name for the resource identifier at the end of the EndpointArn
response parameter that is returned in the created Endpoint
object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1
. For example, this value might result in the EndpointArn
value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1
. If you don't specify a ResourceIdentifier
value, AWS DMS generates a default identifier value for the end of EndpointArn
.
A friendly name for the resource identifier at the end of the EndpointArn
response parameter that is returned in the created Endpoint
object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1
. For example, this value might result in the EndpointArn
value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1
. If you don't specify a ResourceIdentifier
value, DMS generates a default identifier value for the end of EndpointArn
.
The name for the replication subnet group. This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be \"default\".
Example: mySubnetgroup
The description for the subnet group.
", "CreateReplicationTaskMessage$ReplicationTaskIdentifier": "An identifier for the replication task.
Constraints:
Must contain 1-255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
An Amazon Resource Name (ARN) that uniquely identifies the source endpoint.
", "CreateReplicationTaskMessage$TargetEndpointArn": "An Amazon Resource Name (ARN) that uniquely identifies the target endpoint.
", "CreateReplicationTaskMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of a replication instance.
", - "CreateReplicationTaskMessage$TableMappings": "The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the AWS Database Migration Service User Guide.
", - "CreateReplicationTaskMessage$ReplicationTaskSettings": "Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for AWS Database Migration Service Tasks in the AWS Database Migration User Guide.
", - "CreateReplicationTaskMessage$CdcStartPosition": "Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the Database Migration Service User Guide.
", + "CreateReplicationTaskMessage$ReplicationTaskSettings": "Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for Database Migration Service Tasks in the Database Migration Service User Guide.
", + "CreateReplicationTaskMessage$CdcStartPosition": "Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.
Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “
", - "CreateReplicationTaskMessage$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
", - "CreateReplicationTaskMessage$ResourceIdentifier": "A friendly name for the resource identifier at the end of the EndpointArn
response parameter that is returned in the created Endpoint
object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1
. For example, this value might result in the EndpointArn
value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1
. If you don't specify a ResourceIdentifier
value, AWS DMS generates a default identifier value for the end of EndpointArn
.
Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.
", + "CreateReplicationTaskMessage$ResourceIdentifier": "A friendly name for the resource identifier at the end of the EndpointArn
response parameter that is returned in the created Endpoint
object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1
. For example, this value might result in the EndpointArn
value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1
. If you don't specify a ResourceIdentifier
value, DMS generates a default identifier value for the end of EndpointArn
.
The Amazon Resource Name (ARN) of the deleted certificate.
", "DeleteConnectionMessage$EndpointArn": "The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "DeleteConnectionMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", @@ -1665,7 +1680,7 @@ "DeleteReplicationSubnetGroupMessage$ReplicationSubnetGroupIdentifier": "The subnet group name of the replication instance.
", "DeleteReplicationTaskAssessmentRunMessage$ReplicationTaskAssessmentRunArn": "Amazon Resource Name (ARN) of the premigration assessment run to be deleted.
", "DeleteReplicationTaskMessage$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task to be deleted.
", - "DescribeAccountAttributesResponse$UniqueAccountIdentifier": "A unique AWS DMS identifier for an account in a particular AWS Region. The value of this identifier has the following format: c99999999999
. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given AWS Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier.
Here is an example name for this default S3 bucket: dms-111122223333-c44445555666
.
AWS DMS supports the UniqueAccountIdentifier
parameter in versions 3.1.4 and later.
A unique DMS identifier for an account in a particular Region. The value of this identifier has the following format: c99999999999
. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier.
Here is an example name for this default S3 bucket: dms-111122223333-c44445555666
.
DMS supports the UniqueAccountIdentifier
parameter in versions 3.1.4 and later.
Amazon Resource Name (ARN) of a migration task on which you want to base the default list of individual assessments.
", "DescribeApplicableIndividualAssessmentsMessage$ReplicationInstanceArn": "ARN of a replication instance on which you want to base the default list of individual assessments.
", "DescribeApplicableIndividualAssessmentsMessage$SourceEngineName": "Name of a database engine that the specified replication instance supports as a source.
", @@ -1683,8 +1698,8 @@ "DescribeEndpointTypesResponse$Marker": " An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The type of AWS DMS resource that generates events.
Valid values: replication-instance | replication-task
", - "DescribeEventSubscriptionsMessage$SubscriptionName": "The name of the AWS DMS event subscription to be described.
", + "DescribeEventCategoriesMessage$SourceType": "The type of DMS resource that generates events.
Valid values: replication-instance | replication-task
", + "DescribeEventSubscriptionsMessage$SubscriptionName": "The name of the DMS event subscription to be described.
", "DescribeEventSubscriptionsMessage$Marker": " An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The identifier of an event source.
", @@ -1721,17 +1736,17 @@ "DescribeTableStatisticsMessage$Marker": " An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The Amazon Resource Name (ARN) of the replication task.
", "DescribeTableStatisticsResponse$Marker": " An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The IAM role that has permission to access the Amazon S3 bucket.
", + "DmsTransferSettings$ServiceAccessRoleArn": " The IAM role that has permission to access the Amazon S3 bucket. When specified as part of request syntax, such as for the CreateEndpoint
and ModifyEndpoint
actions, the role must allow the iam:PassRole
action.
The name of the S3 bucket to use.
", "DocDbSettings$Username": "The user name you use to access the DocumentDB source endpoint.
", "DocDbSettings$ServerName": "The name of the server on the DocumentDB source endpoint.
", "DocDbSettings$DatabaseName": "The database name on the DocumentDB source endpoint.
", - "DocDbSettings$KmsKeyId": "The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the DocumentDB endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId
parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the DocumentDB endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the DocumentDB endpoint connection details.
The Amazon Resource Name (ARN) used by the service access IAM role.
", - "ElasticsearchSettings$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) used by service to access the IAM role.
", - "ElasticsearchSettings$EndpointUri": "The endpoint for the Elasticsearch cluster. AWS DMS uses HTTPS if a transport protocol (http/https) is not specified.
", + "DynamoDbSettings$ServiceAccessRoleArn": " The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole
action.
The endpoint for the Elasticsearch cluster. DMS uses HTTPS if a transport protocol (http/https) is not specified.
", "Endpoint$EndpointIdentifier": "The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
", "Endpoint$EngineName": "The database engine name. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, \"sqlserver\"
, and \"neptune\"
.
The expanded name for the engine name. For example, if the EngineName
parameter is \"aurora,\" this value would be \"Amazon Aurora MySQL.\"
The name of the database at the endpoint.
", "Endpoint$ExtraConnectionAttributes": "Additional connection attributes used to connect to the endpoint.
", "Endpoint$Status": "The status of the endpoint.
", - "Endpoint$KmsKeyId": "An AWS KMS key identifier that is used to encrypt the connection parameters for the endpoint.
If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key.
AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
", + "Endpoint$KmsKeyId": "An KMS key identifier that is used to encrypt the connection parameters for the endpoint.
If you don't specify a value for the KmsKeyId
parameter, then DMS uses your default encryption key.
KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.
", "Endpoint$EndpointArn": "The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "Endpoint$CertificateArn": "The Amazon Resource Name (ARN) used for SSL connection to the endpoint.
", - "Endpoint$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) used by the service access IAM role.
", + "Endpoint$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole
action.
The external table definition.
", "Endpoint$ExternalId": "Value returned by a call to CreateEndpoint that can be used for cross-account validation. Use it on a subsequent call to CreateEndpoint to create the endpoint with a cross-account.
", "EndpointSetting$Name": "The name that you want to give the endpoint settings.
", "EndpointSetting$Units": "The unit of measure for this endpoint setting.
", "EndpointSetting$Applicability": "The relevance or validity of an endpoint setting for an engine name and its endpoint type.
", + "EndpointSetting$DefaultValue": "The default value of the endpoint setting if no value is specified using CreateEndpoint
or ModifyEndpoint
.
The identifier of an event source.
", "Event$Message": "The event message.
", "EventCategoriesList$member": null, - "EventCategoryGroup$SourceType": "The type of AWS DMS resource that generates events.
Valid values: replication-instance | replication-server | security-group | replication-task
", - "EventSubscription$CustomerAwsId": "The AWS customer account associated with the AWS DMS event notification subscription.
", - "EventSubscription$CustSubscriptionId": "The AWS DMS event notification subscription Id.
", - "EventSubscription$SnsTopicArn": "The topic ARN of the AWS DMS event notification subscription.
", - "EventSubscription$Status": "The status of the AWS DMS event notification subscription.
Constraints:
Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist
The status \"no-permission\" indicates that AWS DMS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.
", - "EventSubscription$SubscriptionCreationTime": "The time the AWS DMS event notification subscription was created.
", - "EventSubscription$SourceType": "The type of AWS DMS resource that generates events.
Valid values: replication-instance | replication-server | security-group | replication-task
", + "EventCategoryGroup$SourceType": "The type of DMS resource that generates events.
Valid values: replication-instance | replication-server | security-group | replication-task
", + "EventSubscription$CustomerAwsId": "The Amazon Web Services customer account associated with the DMS event notification subscription.
", + "EventSubscription$CustSubscriptionId": "The DMS event notification subscription Id.
", + "EventSubscription$SnsTopicArn": "The topic ARN of the DMS event notification subscription.
", + "EventSubscription$Status": "The status of the DMS event notification subscription.
Constraints:
Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist
The status \"no-permission\" indicates that DMS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.
", + "EventSubscription$SubscriptionCreationTime": "The time the DMS event notification subscription was created.
", + "EventSubscription$SourceType": "The type of DMS resource that generates events.
Valid values: replication-instance | replication-server | security-group | replication-task
", "ExcludeTestList$member": null, "Filter$Name": "The name of the filter as specified for a Describe*
or similar operation.
Fully qualified domain name of the endpoint.
", "IBMDb2Settings$CurrentLsn": "For ongoing replication (CDC), use CurrentLSN to specify a log sequence number (LSN) where you want the replication to start.
", "IBMDb2Settings$Username": "Endpoint connection user name.
", - "IBMDb2Settings$SecretsManagerAccessRoleArn": "The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the Db2 LUW endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the Db2 LUW endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the Db2 LUW endpoint connection details.
A customer-assigned name for the certificate. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
", "IncludeTestList$member": null, "IndividualAssessmentNameList$member": null, - "KafkaSettings$Broker": "A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance. Specify each broker location in the form broker-hostname-or-ip:port
. For example, \"ec2-12-345-678-901.compute-1.amazonaws.com:2345\"
. For more information and examples of specifying a list of broker locations, see Using Apache Kafka as a target for AWS Database Migration Service in the AWS Data Migration Service User Guide.
The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies \"kafka-default-topic\"
as the migration topic.
A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance. Specify each broker location in the form broker-hostname-or-ip:port
. For example, \"ec2-12-345-678-901.compute-1.amazonaws.com:2345\"
. For more information and examples of specifying a list of broker locations, see Using Apache Kafka as a target for Database Migration Service in the Database Migration Service User Guide.
The topic to which you migrate the data. If you don't specify a topic, DMS specifies \"kafka-default-topic\"
as the migration topic.
The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target endpoint.
", "KafkaSettings$SslClientKeyArn": "The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target endpoint.
", - "KafkaSettings$SslCaCertificateArn": "The Amazon Resource Name (ARN) for the private Certification Authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
", - "KafkaSettings$SaslUsername": "The secure username you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
", + "KafkaSettings$SslCaCertificateArn": "The Amazon Resource Name (ARN) for the private Certification Authority (CA) cert that DMS uses to securely connect to your Kafka target endpoint.
", + "KafkaSettings$SaslUsername": "The secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
", "KeyList$member": null, "KinesisSettings$StreamArn": "The Amazon Resource Name (ARN) for the Amazon Kinesis Data Streams endpoint.
", - "KinesisSettings$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that AWS DMS uses to write to the Kinesis data stream.
", - "ListTagsForResourceMessage$ResourceArn": "The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS resource.
", + "KinesisSettings$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) for the IAM role that DMS uses to write to the Kinesis data stream. The role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) string that uniquely identifies the DMS resource.
", "MicrosoftSQLServerSettings$DatabaseName": "Database name for the endpoint.
", - "MicrosoftSQLServerSettings$ControlTablesFileGroup": "Specifies a file group for the AWS DMS internal tables. When the replication task starts, all the internal AWS DMS control tables (awsdms_ apply_exception, awsdms_apply, awsdms_changes) are created for the specified file group.
", + "MicrosoftSQLServerSettings$ControlTablesFileGroup": "Specifies a file group for the DMS internal tables. When the replication task starts, all the internal DMS control tables (awsdms_ apply_exception, awsdms_apply, awsdms_changes) are created for the specified file group.
", "MicrosoftSQLServerSettings$ServerName": "Fully qualified domain name of the endpoint.
", "MicrosoftSQLServerSettings$Username": "Endpoint connection user name.
", - "MicrosoftSQLServerSettings$SecretsManagerAccessRoleArn": "The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the SQL Server endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the SQL Server endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the SQL Server endpoint connection details.
The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "ModifyEndpointMessage$EndpointIdentifier": "The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.
", "ModifyEndpointMessage$EngineName": "The type of engine for the endpoint. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, \"sqlserver\"
, and \"neptune\"
.
The user name to be used to login to the endpoint database.
", "ModifyEndpointMessage$ServerName": "The name of the server where the endpoint database resides.
", - "ModifyEndpointMessage$DatabaseName": "The name of the endpoint database.
", + "ModifyEndpointMessage$DatabaseName": "The name of the endpoint database. For a MySQL source or target endpoint, do not specify DatabaseName.
", "ModifyEndpointMessage$ExtraConnectionAttributes": "Additional attributes associated with the connection. To reset this parameter, pass the empty string (\"\") as an argument.
", "ModifyEndpointMessage$CertificateArn": "The Amazon Resource Name (ARN) of the certificate used for SSL connection.
", - "ModifyEndpointMessage$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) for the service access role you want to use to modify the endpoint.
", + "ModifyEndpointMessage$ServiceAccessRoleArn": " The Amazon Resource Name (ARN) for the IAM role you want to use to modify the endpoint. The role must allow the iam:PassRole
action.
The external table definition.
", - "ModifyEventSubscriptionMessage$SubscriptionName": "The name of the AWS DMS event notification subscription to be modified.
", + "ModifyEventSubscriptionMessage$SubscriptionName": "The name of the DMS event notification subscription to be modified.
", "ModifyEventSubscriptionMessage$SnsTopicArn": "The Amazon Resource Name (ARN) of the Amazon SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.
", - "ModifyEventSubscriptionMessage$SourceType": "The type of AWS DMS resource that generates the events you want to subscribe to.
Valid values: replication-instance | replication-task
", + "ModifyEventSubscriptionMessage$SourceType": "The type of DMS resource that generates the events you want to subscribe to.
Valid values: replication-instance | replication-task
", "ModifyReplicationInstanceMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", - "ModifyReplicationInstanceMessage$ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", + "ModifyReplicationInstanceMessage$ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.
", "ModifyReplicationInstanceMessage$PreferredMaintenanceWindow": "The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
", "ModifyReplicationInstanceMessage$EngineVersion": "The engine version number of the replication instance.
When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade
to true
.
The replication instance identifier. This parameter is stored as a lowercase string.
", @@ -1810,73 +1826,74 @@ "ModifyReplicationSubnetGroupMessage$ReplicationSubnetGroupDescription": "A description for the replication instance subnet group.
", "ModifyReplicationTaskMessage$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", "ModifyReplicationTaskMessage$ReplicationTaskIdentifier": "The replication task identifier.
Constraints:
Must contain 1-255 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
When using the AWS CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://
. For example, --table-mappings file://mappingfile.json
. When working with the DMS API, provide the JSON as the parameter value.
When using the CLI or boto3, provide the path of the JSON file that contains the table mappings. Precede the path with file://
. For example, --table-mappings file://mappingfile.json
. When working with the DMS API, provide the JSON as the parameter value.
JSON file that contains settings for the task, such as task metadata settings.
", - "ModifyReplicationTaskMessage$CdcStartPosition": "Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.
Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “
", - "ModifyReplicationTaskMessage$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
", + "ModifyReplicationTaskMessage$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.
", "MongoDbSettings$Username": "The user name you use to access the MongoDB source endpoint.
", "MongoDbSettings$ServerName": "The name of the server on the MongoDB source endpoint.
", "MongoDbSettings$DatabaseName": "The database name on the MongoDB source endpoint.
", "MongoDbSettings$ExtractDocId": " Specifies the document ID. Use this setting when NestingLevel
is set to \"none\"
.
Default value is \"false\"
.
Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel
is set to \"one\"
.
Must be a positive value greater than 0
. Default value is 1000
.
The MongoDB database name. This setting isn't used when AuthType
is set to \"no\"
.
The default is \"admin\"
.
The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the MongoDB endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId
parameter, then DMS uses your default encryption key. KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the MongoDB endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the MongoDB endpoint connection details.
The Amazon Resource Name (ARN) of the task that you want to move.
", "MoveReplicationTaskMessage$TargetReplicationInstanceArn": "The ARN of the replication instance where you want to move the task to.
", - "MySQLSettings$AfterConnectScript": "Specifies a script to run immediately after AWS DMS connects to the endpoint. The migration task continues running regardless if the SQL statement succeeds or fails.
", - "MySQLSettings$DatabaseName": "Database name for the endpoint.
", + "MySQLSettings$AfterConnectScript": "Specifies a script to run immediately after DMS connects to the endpoint. The migration task continues running regardless if the SQL statement succeeds or fails.
For this parameter, provide the code of the script itself, not the name of a file containing the script.
", + "MySQLSettings$DatabaseName": "Database name for the endpoint. For a MySQL source or target endpoint, don't explicitly specify the database using the DatabaseName
request parameter on either the CreateEndpoint
or ModifyEndpoint
API call. Specifying DatabaseName
when you create or modify a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task.
Fully qualified domain name of the endpoint.
", "MySQLSettings$ServerTimezone": "Specifies the time zone for the source MySQL database.
Example: serverTimezone=US/Pacific;
Note: Do not enclose time zones in single quotes.
", "MySQLSettings$Username": "Endpoint connection user name.
", - "MySQLSettings$SecretsManagerAccessRoleArn": "The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the MySQL endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the MySQL endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the MySQL endpoint connection details.
The Amazon Resource Name (ARN) of the service role that you created for the Neptune target endpoint. For more information, see Creating an IAM Service Role for Accessing Amazon Neptune as a Target in the AWS Database Migration Service User Guide.
", - "NeptuneSettings$S3BucketName": "The name of the Amazon S3 bucket where AWS DMS can temporarily store migrated graph data in .csv files before bulk-loading it to the Neptune target database. AWS DMS maps the SQL source data to graph data before storing it in these .csv files.
", - "NeptuneSettings$S3BucketFolder": "A folder path where you want AWS DMS to store migrated graph data in the S3 bucket specified by S3BucketName
The Amazon Resource Name (ARN) of the service role that you created for the Neptune target endpoint. The role must allow the iam:PassRole
action. For more information, see Creating an IAM Service Role for Accessing Amazon Neptune as a Target in the Database Migration Service User Guide.
The name of the Amazon S3 bucket where DMS can temporarily store migrated graph data in .csv files before bulk-loading it to the Neptune target database. DMS maps the SQL source data to graph data before storing it in these .csv files.
", + "NeptuneSettings$S3BucketFolder": "A folder path where you want DMS to store migrated graph data in the S3 bucket specified by S3BucketName
Set this string attribute to the required value in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This value specifies the default Oracle root used to access the redo logs.
", "OracleSettings$UsePathPrefix": "Set this string attribute to the required value in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This value specifies the path prefix used to replace the default Oracle root to access the redo logs.
", "OracleSettings$AsmServer": "For an Oracle source endpoint, your ASM server address. You can set this value from the asm_server
value. You set asm_server
as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see Configuration for change data capture (CDC) on an Oracle source database.
For an Oracle source endpoint, your ASM user name. You can set this value from the asm_user
value. You set asm_user
as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see Configuration for change data capture (CDC) on an Oracle source database.
Database name for the endpoint.
", - "OracleSettings$SecurityDbEncryptionName": "For an Oracle source endpoint, the name of a key used for the transparent data encryption (TDE) of the columns and tablespaces in an Oracle source database that is encrypted using TDE. The key value is the value of the SecurityDbEncryption
setting. For more information on setting the key name value of SecurityDbEncryptionName
, see the information and example for setting the securityDbEncryptionName
extra connection attribute in Supported encryption methods for using Oracle as a source for AWS DMS in the AWS Database Migration Service User Guide.
For an Oracle source endpoint, the name of a key used for the transparent data encryption (TDE) of the columns and tablespaces in an Oracle source database that is encrypted using TDE. The key value is the value of the SecurityDbEncryption
setting. For more information on setting the key name value of SecurityDbEncryptionName
, see the information and example for setting the securityDbEncryptionName
extra connection attribute in Supported encryption methods for using Oracle as a source for DMS in the Database Migration Service User Guide.
Fully qualified domain name of the endpoint.
", "OracleSettings$SpatialDataOptionToGeoJsonFunctionName": "Use this attribute to convert SDO_GEOMETRY
to GEOJSON
format. By default, DMS calls the SDO2GEOJSON
custom function if present and accessible. Or you can create your own custom function that mimics the operation of SDOGEOJSON
and set SpatialDataOptionToGeoJsonFunctionName
to call it instead.
Endpoint connection user name.
", - "OracleSettings$SecretsManagerAccessRoleArn": "The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the Oracle endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the Oracle endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the Oracle endpoint connection details.
Required only if your Oracle endpoint uses Advanced Storage Manager (ASM). The full ARN of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the SecretsManagerOracleAsmSecret
. This SecretsManagerOracleAsmSecret
has the secret value that allows access to the Oracle ASM of the endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerOracleAsmSecretId
. Or you can specify clear-text values for AsmUserName
, AsmPassword
, and AsmServerName
. You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret
and the SecretsManagerOracleAsmAccessRoleArn
and SecretsManagerOracleAsmSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
Required only if your Oracle endpoint uses Advanced Storage Manager (ASM). The full ARN of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the SecretsManagerOracleAsmSecret
. This SecretsManagerOracleAsmSecret
has the secret value that allows access to the Oracle ASM of the endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerOracleAsmSecretId
. Or you can specify clear-text values for AsmUserName
, AsmPassword
, and AsmServerName
. You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret
and the SecretsManagerOracleAsmAccessRoleArn
and SecretsManagerOracleAsmSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
Required only if your Oracle endpoint uses Advanced Storage Manager (ASM). The full ARN, partial ARN, or friendly name of the SecretsManagerOracleAsmSecret
that contains the Oracle ASM connection details for the Oracle endpoint.
The version of the replication engine.
", - "OrderableReplicationInstance$ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", + "OrderableReplicationInstance$ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\"
.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.
", "OrderableReplicationInstance$StorageType": "The type of storage used by the replication instance.
", "PendingMaintenanceAction$Action": "The type of pending maintenance action that is available for the resource.
", "PendingMaintenanceAction$OptInStatus": "The type of opt-in request that has been received for the resource.
", "PendingMaintenanceAction$Description": "A description providing more detail about the maintenance action.
", - "PostgreSQLSettings$AfterConnectScript": "For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
Example: afterConnectScript=SET session_replication_role='replica'
For use with change data capture (CDC) only, this attribute has DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
Example: afterConnectScript=SET session_replication_role='replica'
Database name for the endpoint.
", "PostgreSQLSettings$DdlArtifactsSchema": "The schema in which the operational DDL database artifacts are created.
Example: ddlArtifactsSchema=xyzddlschema;
Sets the schema in which the heartbeat artifacts are created.
", "PostgreSQLSettings$ServerName": "Fully qualified domain name of the endpoint.
", "PostgreSQLSettings$Username": "Endpoint connection user name.
", - "PostgreSQLSettings$SlotName": "Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
When used with the AWS DMS API CdcStartPosition
request parameter, this attribute also enables using native CDC start points.
The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the PostgreSQL endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
When used with the DMS API CdcStartPosition
request parameter, this attribute also enables using native CDC start points.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the PostgreSQL endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the PostgreSQL endpoint connection details.
The Amazon Resource Name (ARN) of the replication instance.
", "RedshiftSettings$AfterConnectScript": "Code to run after connecting. This parameter should contain the code itself, not the name of a file containing the code.
", - "RedshiftSettings$BucketFolder": "An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift cluster.
For full load mode, AWS DMS converts source records into .csv files and loads them to the BucketFolder/TableID path. AWS DMS uses the Redshift COPY
command to upload the .csv files to the target table. The files are deleted once the COPY
operation has finished. For more information, see COPY in the Amazon Redshift Database Developer Guide.
For change-data-capture (CDC) mode, AWS DMS creates a NetChanges table, and loads the .csv files to this BucketFolder/NetChangesTableID path.
", + "RedshiftSettings$BucketFolder": "An S3 folder where the comma-separated-value (.csv) files are stored before being uploaded to the target Redshift cluster.
For full load mode, DMS converts source records into .csv files and loads them to the BucketFolder/TableID path. DMS uses the Redshift COPY
command to upload the .csv files to the target table. The files are deleted once the COPY
operation has finished. For more information, see COPY in the Amazon Redshift Database Developer Guide.
For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads the .csv files to this BucketFolder/NetChangesTableID path.
", "RedshiftSettings$BucketName": "The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift.
", "RedshiftSettings$DatabaseName": "The name of the Amazon Redshift data warehouse (service) that you are working with.
", "RedshiftSettings$DateFormat": "The date format that you are using. Valid values are auto
(case-sensitive), your date format string enclosed in quotes, or NULL. If this parameter is left unset (NULL), it defaults to a format of 'YYYY-MM-DD'. Using auto
recognizes most strings, even some that aren't supported when you use a date format string.
If your date and time values use formats different from each other, set this to auto
.
A list of characters that you want to replace. Use with ReplaceChars
.
A value that specifies to replaces the invalid characters specified in ReplaceInvalidChars
, substituting the specified characters instead. The default is \"?\"
.
The name of the Amazon Redshift cluster you are using.
", - "RedshiftSettings$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service.
", - "RedshiftSettings$ServerSideEncryptionKmsKeyId": "The AWS KMS key ID. If you are using SSE_KMS
for the EncryptionMode
, provide this key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.
The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon Redshift service. The role must allow the iam:PassRole
action.
The KMS key ID. If you are using SSE_KMS
for the EncryptionMode
, provide this key ID. The key that you use needs an attached policy that enables IAM user permissions and allows use of the key.
The time format that you want to use. Valid values are auto
(case-sensitive), 'timeformat_string'
, 'epochsecs'
, or 'epochmillisecs'
. It defaults to 10. Using auto
recognizes most strings, even some that aren't supported when you use a time format string.
If your date and time values use formats different from each other, set this parameter to auto
.
An Amazon Redshift user name for a registered user.
", - "RedshiftSettings$SecretsManagerAccessRoleArn": "The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the Amazon Redshift endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the Amazon Redshift endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the Amazon Redshift endpoint connection details.
The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
", "RefreshSchemasMessage$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", @@ -1885,14 +1902,14 @@ "RefreshSchemasStatus$LastFailureMessage": "The last failure message for the schema.
", "ReloadTablesMessage$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", "ReloadTablesResponse$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", - "RemoveTagsFromResourceMessage$ResourceArn": "An AWS DMS resource from which you want to remove tag(s). The value for this parameter is an Amazon Resource Name (ARN).
", + "RemoveTagsFromResourceMessage$ResourceArn": "An DMS resource from which you want to remove tag(s). The value for this parameter is an Amazon Resource Name (ARN).
", "ReplicationInstance$ReplicationInstanceIdentifier": "The replication instance identifier is a required parameter. This parameter is stored as a lowercase string.
Constraints:
Must contain 1-63 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Example: myrepinstance
The compute and memory capacity of the replication instance as defined for the specified replication instance class. It is a required parameter, although a defualt value is pre-selected in the DMS console.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", + "ReplicationInstance$ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class. It is a required parameter, although a default value is pre-selected in the DMS console.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.
", "ReplicationInstance$ReplicationInstanceStatus": "The status of the replication instance. The possible return values include:
\"available\"
\"creating\"
\"deleted\"
\"deleting\"
\"failed\"
\"modifying\"
\"upgrading\"
\"rebooting\"
\"resetting-master-credentials\"
\"storage-full\"
\"incompatible-credentials\"
\"incompatible-network\"
\"maintenance\"
The Availability Zone for the instance.
", "ReplicationInstance$PreferredMaintenanceWindow": "The maintenance window times for the replication instance. Any pending upgrades to the replication instance are performed during this time.
", "ReplicationInstance$EngineVersion": "The engine version number of the replication instance.
If an engine version number is not specified when a replication instance is created, the default is the latest engine version available.
When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade
to true
.
An AWS KMS key identifier that is used to encrypt the data on the replication instance.
If you don't specify a value for the KmsKeyId
parameter, then AWS DMS uses your default encryption key.
AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.
", + "ReplicationInstance$KmsKeyId": "An KMS key identifier that is used to encrypt the data on the replication instance.
If you don't specify a value for the KmsKeyId
parameter, then DMS uses your default encryption key.
KMS creates the default encryption key for your account. Your account has a different default encryption key for each Region.
", "ReplicationInstance$ReplicationInstanceArn": "The Amazon Resource Name (ARN) of the replication instance.
", "ReplicationInstance$ReplicationInstancePublicIpAddress": "The public IP address of the replication instance.
", "ReplicationInstance$ReplicationInstancePrivateIpAddress": "The private IP address of the replication instance.
", @@ -1902,7 +1919,7 @@ "ReplicationInstancePublicIpAddressList$member": null, "ReplicationInstanceTaskLog$ReplicationTaskName": "The name of the replication task.
", "ReplicationInstanceTaskLog$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", - "ReplicationPendingModifiedValues$ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.
", + "ReplicationPendingModifiedValues$ReplicationInstanceClass": "The compute and memory capacity of the replication instance as defined for the specified replication instance class.
For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.
", "ReplicationPendingModifiedValues$EngineVersion": "The engine version number of the replication instance.
", "ReplicationSubnetGroup$ReplicationSubnetGroupIdentifier": "The identifier of the replication instance subnet group.
", "ReplicationSubnetGroup$ReplicationSubnetGroupDescription": "A description for the replication subnet group.
", @@ -1921,63 +1938,63 @@ "ReplicationTask$CdcStopPosition": "Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “
", "ReplicationTask$RecoveryCheckpoint": "Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition
parameter to start a CDC operation that begins at that checkpoint.
The Amazon Resource Name (ARN) of the replication task.
", - "ReplicationTask$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.
", + "ReplicationTask$TaskData": "Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the Database Migration Service User Guide.
", "ReplicationTask$TargetReplicationInstanceArn": "The ARN of the replication instance to which this task is moved in response to running the MoveReplicationTask
operation. Otherwise, this response parameter isn't a member of the ReplicationTask
object.
The replication task identifier of the task on which the task assessment was run.
", "ReplicationTaskAssessmentResult$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task.
", "ReplicationTaskAssessmentResult$AssessmentStatus": "The status of the task assessment.
", "ReplicationTaskAssessmentResult$AssessmentResultsFile": "The file containing the results of the task assessment.
", - "ReplicationTaskAssessmentResult$AssessmentResults": "The task assessment results in JSON format.
", - "ReplicationTaskAssessmentResult$S3ObjectUrl": "The URL of the S3 object containing the task assessment results.
", + "ReplicationTaskAssessmentResult$AssessmentResults": "The task assessment results in JSON format.
The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.
", + "ReplicationTaskAssessmentResult$S3ObjectUrl": "The URL of the S3 object containing the task assessment results.
The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.
", "ReplicationTaskAssessmentRun$ReplicationTaskAssessmentRunArn": "Amazon Resource Name (ARN) of this assessment run.
", "ReplicationTaskAssessmentRun$ReplicationTaskArn": "ARN of the migration task associated with this premigration assessment run.
", "ReplicationTaskAssessmentRun$Status": "Assessment run status.
This status can have one of the following values:
\"cancelling\"
– The assessment run was canceled by the CancelReplicationTaskAssessmentRun
operation.
\"deleting\"
– The assessment run was deleted by the DeleteReplicationTaskAssessmentRun
operation.
\"failed\"
– At least one individual assessment completed with a failed
status.
\"error-provisioning\"
– An internal error occurred while resources were provisioned (during provisioning
status).
\"error-executing\"
– An internal error occurred while individual assessments ran (during running
status).
\"invalid state\"
– The assessment run is in an unknown state.
\"passed\"
– All individual assessments have completed, and none has a failed
status.
\"provisioning\"
– Resources required to run individual assessments are being provisioned.
\"running\"
– Individual assessments are being run.
\"starting\"
– The assessment run is starting, but resources are not yet being provisioned for individual assessments.
Last message generated by an individual assessment failure.
", - "ReplicationTaskAssessmentRun$ServiceAccessRoleArn": "ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun
operation.
Amazon S3 bucket where AWS DMS stores the results of this assessment run.
", - "ReplicationTaskAssessmentRun$ResultLocationFolder": "Folder in an Amazon S3 bucket where AWS DMS stores the results of this assessment run.
", + "ReplicationTaskAssessmentRun$ServiceAccessRoleArn": "ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun
operation. The role must allow the iam:PassRole
action.
Amazon S3 bucket where DMS stores the results of this assessment run.
", + "ReplicationTaskAssessmentRun$ResultLocationFolder": "Folder in an Amazon S3 bucket where DMS stores the results of this assessment run.
", "ReplicationTaskAssessmentRun$ResultEncryptionMode": "Encryption mode used to encrypt the assessment run results.
", - "ReplicationTaskAssessmentRun$ResultKmsKeyArn": "ARN of the AWS KMS encryption key used to encrypt the assessment run results.
", + "ReplicationTaskAssessmentRun$ResultKmsKeyArn": "ARN of the KMS encryption key used to encrypt the assessment run results.
", "ReplicationTaskAssessmentRun$AssessmentRunName": "Unique name of the assessment run.
", "ReplicationTaskIndividualAssessment$ReplicationTaskIndividualAssessmentArn": "Amazon Resource Name (ARN) of this individual assessment.
", "ReplicationTaskIndividualAssessment$ReplicationTaskAssessmentRunArn": "ARN of the premigration assessment run that is created to run this individual assessment.
", "ReplicationTaskIndividualAssessment$IndividualAssessmentName": "Name of this individual assessment.
", "ReplicationTaskIndividualAssessment$Status": "Individual assessment status.
This status can have one of the following values:
\"cancelled\"
\"error\"
\"failed\"
\"passed\"
\"pending\"
\"running\"
The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) for AWS DMS in the DMS documentation.
", - "S3Settings$ServiceAccessRoleArn": "The Amazon Resource Name (ARN) used by the service access IAM role. It is a required parameter that enables DMS to write and read objects from an S3 bucket.
", + "ResourcePendingMaintenanceActions$ResourceIdentifier": "The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) for DMS in the DMS documentation.
", + "S3Settings$ServiceAccessRoleArn": " The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the iam:PassRole
action. It is a required parameter that enables DMS to write and read objects from an S3 bucket.
Specifies how tables are defined in the S3 source files only.
", "S3Settings$CsvRowDelimiter": " The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return (\\n
).
The delimiter used to separate columns in the .csv file for both source and target. The default is a comma.
", "S3Settings$BucketFolder": " An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then the path used is schema_name/table_name/
.
The name of the S3 bucket.
", - "S3Settings$ServerSideEncryptionKmsKeyId": "If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID. The key that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user permissions and allows use of the key.
Here is a CLI example: aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data for an Amazon S3 target.
AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data when you set TimestampColumnName
to a nonblank value.
For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS.
For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of that row in the source database.
The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit timestamp supported by DMS for the source database.
When the AddColumnName
parameter is set to true
, DMS also includes a name for the timestamp column that you set with TimestampColumnName
.
This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in .csv format. If UseCsvNoSupValue
is set to true, specify a string value that you want AWS DMS to use for all columns not included in the supplemental log. If you do not specify a string value, AWS DMS uses the null value for these columns regardless of the UseCsvNoSupValue
setting.
This setting is supported in AWS DMS versions 3.4.1 and later.
Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If CdcPath
is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, AWS DMS verifies that you have set this parameter to a folder path on your S3 target where AWS DMS can save the transaction order for the CDC load. AWS DMS creates this CDC folder path in either your S3 target working directory or the S3 target location specified by BucketFolder
and BucketName
.
For example, if you specify CdcPath
as MyChangedData
, and you specify BucketName
as MyTargetBucket
but do not specify BucketFolder
, AWS DMS creates the CDC folder path following: MyTargetBucket/MyChangedData
.
If you specify the same CdcPath
, and you specify BucketName
as MyTargetBucket
and BucketFolder
as MyTargetData
, AWS DMS creates the CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
This setting is supported in AWS DMS versions 3.4.2 and later.
If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The key that you use needs an attached policy that enables Identity and Access Management (IAM) user permissions and allows use of the key.
Here is a CLI example: aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an Amazon S3 target.
DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data when you set TimestampColumnName
to a nonblank value.
For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from the source to the target by DMS.
For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of that row in the source database.
The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit timestamp supported by DMS for the source database.
When the AddColumnName
parameter is set to true
, DMS also includes a name for the timestamp column that you set with TimestampColumnName
.
This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in .csv format. If UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for these columns regardless of the UseCsvNoSupValue
setting.
This setting is supported in DMS versions 3.4.1 and later.
Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this parameter to a folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC folder path in either your S3 target working directory or the S3 target location specified by BucketFolder
and BucketName
.
For example, if you specify CdcPath
as MyChangedData
, and you specify BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS creates the CDC folder path following: MyTargetBucket/MyChangedData
.
If you specify the same CdcPath
, and you specify BucketName
as MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
This setting is supported in DMS versions 3.4.2 and later.
The Amazon Resource Name (ARN) of the replication task.
", "StartReplicationTaskAssessmentRunMessage$ReplicationTaskArn": "Amazon Resource Name (ARN) of the migration task associated with the premigration assessment run that you want to start.
", - "StartReplicationTaskAssessmentRunMessage$ServiceAccessRoleArn": "ARN of a service role needed to start the assessment run.
", - "StartReplicationTaskAssessmentRunMessage$ResultLocationBucket": "Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.
", - "StartReplicationTaskAssessmentRunMessage$ResultLocationFolder": "Folder within an Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.
", - "StartReplicationTaskAssessmentRunMessage$ResultEncryptionMode": "Encryption mode that you can specify to encrypt the results of this assessment run. If you don't specify this request parameter, AWS DMS stores the assessment run results without encryption. You can specify one of the options following:
\"SSE_S3\"
– The server-side encryption provided as a default by Amazon S3.
\"SSE_KMS\"
– AWS Key Management Service (AWS KMS) encryption. This encryption can use either a custom KMS encryption key that you specify or the default KMS encryption key that DMS provides.
ARN of the service role needed to start the assessment run. The role must allow the iam:PassRole
action.
Amazon S3 bucket where you want DMS to store the results of this assessment run.
", + "StartReplicationTaskAssessmentRunMessage$ResultLocationFolder": "Folder within an Amazon S3 bucket where you want DMS to store the results of this assessment run.
", + "StartReplicationTaskAssessmentRunMessage$ResultEncryptionMode": "Encryption mode that you can specify to encrypt the results of this assessment run. If you don't specify this request parameter, DMS stores the assessment run results without encryption. You can specify one of the options following:
\"SSE_S3\"
– The server-side encryption provided as a default by Amazon S3.
\"SSE_KMS\"
– Key Management Service (KMS) encryption. This encryption can use either a custom KMS encryption key that you specify or the default KMS encryption key that DMS provides.
ARN of a custom KMS encryption key that you specify when you set ResultEncryptionMode
to \"SSE_KMS
\".
Unique name to identify the assessment run.
", "StartReplicationTaskMessage$ReplicationTaskArn": "The Amazon Resource Name (ARN) of the replication task to be started.
", - "StartReplicationTaskMessage$CdcStartPosition": "Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.
Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.
The value can be in date, checkpoint, or LSN/SCN format.
Date Example: --cdc-start-position “2018-03-08T12:12:12”
Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"
LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”
When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName
extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for DMS.
Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.
Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 “
", "StopReplicationTaskMessage$ReplicationTaskArn": "The Amazon Resource Name(ARN) of the replication task to be stopped.
", "Subnet$SubnetIdentifier": "The subnet identifier.
", "Subnet$SubnetStatus": "The status of the subnet.
", "SubnetIdentifierList$member": null, "SupportedEndpointType$EngineName": "The database engine name. Valid values, depending on the EndpointType, include \"mysql\"
, \"oracle\"
, \"postgres\"
, \"mariadb\"
, \"aurora\"
, \"aurora-postgresql\"
, \"redshift\"
, \"s3\"
, \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
, \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"documentdb\"
, \"sqlserver\"
, and \"neptune\"
.
The earliest AWS DMS engine version that supports this endpoint engine. Note that endpoint engines released with AWS DMS versions earlier than 3.1.1 do not return a value for this parameter.
", + "SupportedEndpointType$ReplicationInstanceEngineMinimumVersion": "The earliest DMS engine version that supports this endpoint engine. Note that endpoint engines released with DMS versions earlier than 3.1.1 do not return a value for this parameter.
", "SupportedEndpointType$EngineDisplayName": "The expanded name for the engine name. For example, if the EngineName
parameter is \"aurora,\" this value would be \"Amazon Aurora MySQL.\"
Database name for the endpoint.
", "SybaseSettings$ServerName": "Fully qualified domain name of the endpoint.
", "SybaseSettings$Username": "Endpoint connection user name.
", - "SybaseSettings$SecretsManagerAccessRoleArn": "The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. SecretsManagerSecret
has the value of the AWS Secrets Manager secret that allows access to the SAP ASE endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access AWS Database Migration Service resources in the AWS Database Migration Service User Guide.
The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the value in SecretsManagerSecret
. The role must allow the iam:PassRole
action. SecretsManagerSecret
has the value of the Amazon Web Services Secrets Manager secret that allows access to the SAP ASE endpoint.
You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerSecretId
. Or you can specify clear-text values for UserName
, Password
, ServerName
, and Port
. You can't specify both. For more information on creating this SecretsManagerSecret
and the SecretsManagerAccessRoleArn
and SecretsManagerSecretId
required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.
The full ARN, partial ARN, or friendly name of the SecretsManagerSecret
that contains the SAP SAE endpoint connection details.
The schema name.
", "TableStatistics$TableName": "The name of the table.
", @@ -2034,9 +2051,9 @@ "SybaseSettings": { "base": "Provides information that defines a SAP ASE endpoint.
", "refs": { - "CreateEndpointMessage$SybaseSettings": "Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.
", + "CreateEndpointMessage$SybaseSettings": "Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide.
", "Endpoint$SybaseSettings": "The settings for the SAP ASE source and target endpoint. For more information, see the SybaseSettings
structure.
Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.
" + "ModifyEndpointMessage$SybaseSettings": "Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide.
" } }, "TStamp": { @@ -2097,7 +2114,7 @@ } }, "Tag": { - "base": "A user-defined key-value pair that describes metadata added to an AWS DMS resource and that is used by operations such as the following:
AddTagsToResource
ListTagsForResource
RemoveTagsFromResource
A user-defined key-value pair that describes metadata added to an DMS resource and that is used by operations such as the following:
AddTagsToResource
ListTagsForResource
RemoveTagsFromResource
Number of events that must be received from Amazon EventBridge before EventBridge event trigger fires.
" + } + }, "BatchStopJobRunError": { "base": "Records an error that occurred when attempting to stop a specified job run.
", "refs": { @@ -430,6 +436,12 @@ "refs": { } }, + "BatchWindow": { + "base": null, + "refs": { + "EventBatchingCondition$BatchWindow": "Window of time in seconds after which EventBridge event trigger fires. Window starts when first event is received.
" + } + }, "BinaryColumnStatisticsData": { "base": "Defines column statistics supported for bit sequence data values.
", "refs": { @@ -1742,7 +1754,7 @@ } }, "Edge": { - "base": "An edge represents a directed connection between two Glue components that are part of the workflow the edge belongs to.
", + "base": "An edge represents a directed connection between two components on a workflow graph.
", "refs": { "EdgeList$member": null } @@ -1828,6 +1840,14 @@ "MLTransform$EvaluationMetrics": "An EvaluationMetrics
object. Evaluation metrics provide an estimate of the quality of your machine learning transform.
Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.
", + "refs": { + "CreateTriggerRequest$EventBatchingCondition": null, + "Trigger$EventBatchingCondition": "Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.
", + "TriggerUpdate$EventBatchingCondition": "Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.
" + } + }, "ExecutionProperty": { "base": "An execution property of a job.
", "refs": { @@ -3538,7 +3558,7 @@ "UserDefinedFunctionInput$FunctionName": "The name of the function.
", "UserDefinedFunctionInput$ClassName": "The Java class that contains the function code.
", "UserDefinedFunctionInput$OwnerName": "The owner of the function.
", - "Workflow$Name": "The name of the workflow representing the flow.
", + "Workflow$Name": "The name of the workflow.
", "WorkflowNames$member": null, "WorkflowRun$Name": "Name of the workflow that was run.
", "XMLClassifier$Name": "The name of the classifier.
" @@ -3558,7 +3578,7 @@ } }, "Node": { - "base": "A node represents an Glue component such as a trigger, or job, etc., that is part of a workflow.
", + "base": "A node represents an Glue component (trigger, crawler, or job) on a workflow graph.
", "refs": { "NodeList$member": null } @@ -3693,6 +3713,8 @@ "MLTransform$MaxRetries": "The maximum number of times to retry after an MLTaskRun
of the machine learning transform fails.
Sets the number of files in each leaf folder to be crawled when crawling sample files in a dataset. If not set, all the files are crawled. A valid value is an integer between 1 and 249.
", "StartJobRunRequest$NumberOfWorkers": "The number of workers of a defined workerType
that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
Number of events in the batch.
", + "StartingEventBatchCondition$BatchWindow": "Duration of the batch window in seconds.
", "UpdateMLTransformRequest$NumberOfWorkers": "The number of workers of a defined workerType
that are allocated when this task runs.
The maximum number of times to retry a task for this transform after a task run fails.
", "UpdateWorkflowRequest$MaxConcurrentRuns": "You can use this parameter to prevent unwanted multiple updates to data, to control costs, or in some cases, to prevent exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs.
", @@ -4667,6 +4689,12 @@ "refs": { } }, + "StartingEventBatchCondition": { + "base": "The batch condition that started the workflow run. Either the number of events in the batch size arrived, in which case the BatchSize member is non-zero, or the batch window expired, in which case the BatchWindow member is non-zero.
", + "refs": { + "WorkflowRun$StartingEventBatchCondition": "The batch condition that started the workflow run.
" + } + }, "StopCrawlerRequest": { "base": null, "refs": { @@ -5547,7 +5575,7 @@ } }, "Workflow": { - "base": "A workflow represents a flow in which Glue components should be run to complete a logical task.
", + "base": "A workflow is a collection of multiple dependent Glue jobs and crawlers that are run to complete a complex ETL task. A workflow manages the execution and monitoring of all its jobs and crawlers.
", "refs": { "GetWorkflowResponse$Workflow": "The resource metadata for the workflow.
", "Workflows$member": null @@ -5583,7 +5611,7 @@ "GetWorkflowRunPropertiesResponse$RunProperties": "The workflow run properties which were set during the specified run.
", "PutWorkflowRunPropertiesRequest$RunProperties": "The properties to put for the specified run.
", "UpdateWorkflowRequest$DefaultRunProperties": "A collection of properties to be used as part of each execution of the workflow.
", - "Workflow$DefaultRunProperties": "A collection of properties to be used as part of each execution of the workflow.
", + "Workflow$DefaultRunProperties": "A collection of properties to be used as part of each execution of the workflow. The run properties are made available to each job in the workflow. A job can modify the properties for the next jobs in the flow.
", "WorkflowRun$WorkflowRunProperties": "The workflow run properties which were set during the run.
" } }, diff --git a/models/apis/healthlake/2017-07-01/api-2.json b/models/apis/healthlake/2017-07-01/api-2.json index b7856d7575d..31d96ef19c6 100644 --- a/models/apis/healthlake/2017-07-01/api-2.json +++ b/models/apis/healthlake/2017-07-01/api-2.json @@ -25,6 +25,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ] }, @@ -104,6 +105,51 @@ {"shape":"InternalServerException"} ] }, + "ListFHIRExportJobs":{ + "name":"ListFHIRExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFHIRExportJobsRequest"}, + "output":{"shape":"ListFHIRExportJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ] + }, + "ListFHIRImportJobs":{ + "name":"ListFHIRImportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFHIRImportJobsRequest"}, + "output":{"shape":"ListFHIRImportJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "StartFHIRExportJob":{ "name":"StartFHIRExportJob", "http":{ @@ -135,6 +181,32 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ] } }, "shapes":{ @@ -145,6 +217,12 @@ }, "exception":true }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:healthlake:[a-z0-9-]+:\\d{12}:datastore\\/fhir\\/.{32}" + }, "BoundedLengthString":{ "type":"string", "max":5000, @@ -157,6 +235,13 @@ "min":1, "pattern":"^[a-zA-Z0-9-]+$" }, + "CmkType":{ + "type":"string", + "enum":[ + "CUSTOMER_MANAGED_KMS_KEY", + "AWS_OWNED_KMS_KEY" + ] + }, "ConflictException":{ "type":"structure", "members":{ @@ -170,11 +255,13 @@ "members":{ "DatastoreName":{"shape":"DatastoreName"}, "DatastoreTypeVersion":{"shape":"FHIRVersion"}, + "SseConfiguration":{"shape":"SseConfiguration"}, "PreloadDataConfig":{"shape":"PreloadDataConfig"}, "ClientToken":{ "shape":"ClientTokenString", "idempotencyToken":true - } + }, + "Tags":{"shape":"TagList"} } }, "CreateFHIRDatastoreResponse":{ @@ -234,6 +321,7 @@ "CreatedAt":{"shape":"Timestamp"}, "DatastoreTypeVersion":{"shape":"FHIRVersion"}, "DatastoreEndpoint":{"shape":"String"}, + "SseConfiguration":{"shape":"SseConfiguration"}, "PreloadDataConfig":{"shape":"PreloadDataConfig"} } }, @@ -320,6 +408,12 @@ "ImportJobProperties":{"shape":"ImportJobProperties"} } }, + "EncryptionKeyID":{ + "type":"string", + "max":400, + "min":1, + "pattern":"(arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:kms:)?([a-z]{2}-[a-z]+(-[a-z]+)?-\\d:)?(\\d{12}:)?(((key/)?[a-zA-Z0-9-_]+)|(alias/[a-zA-Z0-9:/_-]+))" + }, "ExportJobProperties":{ "type":"structure", "required":[ @@ -341,6 +435,10 @@ "Message":{"shape":"Message"} } }, + "ExportJobPropertiesList":{ + "type":"list", + "member":{"shape":"ExportJobProperties"} + }, "FHIRVersion":{ "type":"string", "enum":["R4"] @@ -368,10 +466,15 @@ "EndTime":{"shape":"Timestamp"}, "DatastoreId":{"shape":"DatastoreId"}, "InputDataConfig":{"shape":"InputDataConfig"}, + "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DataAccessRoleArn":{"shape":"IamRoleArn"}, "Message":{"shape":"Message"} } }, + "ImportJobPropertiesList":{ + "type":"list", + "member":{"shape":"ImportJobProperties"} + }, "InputDataConfig":{ "type":"structure", "members":{ @@ -404,10 +507,19 @@ "enum":[ "SUBMITTED", "IN_PROGRESS", + "COMPLETED_WITH_ERRORS", "COMPLETED", "FAILED" ] }, + "KmsEncryptionConfig":{ + "type":"structure", + "required":["CmkType"], + "members":{ + "CmkType":{"shape":"CmkType"}, + "KmsKeyId":{"shape":"EncryptionKeyID"} + } + }, "ListFHIRDatastoresRequest":{ "type":"structure", "members":{ @@ -424,6 +536,61 @@ "NextToken":{"shape":"NextToken"} } }, + "ListFHIRExportJobsRequest":{ + "type":"structure", + "required":["DatastoreId"], + "members":{ + "DatastoreId":{"shape":"DatastoreId"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResultsInteger"}, + "JobName":{"shape":"JobName"}, + "JobStatus":{"shape":"JobStatus"}, + "SubmittedBefore":{"shape":"Timestamp"}, + "SubmittedAfter":{"shape":"Timestamp"} + } + }, + "ListFHIRExportJobsResponse":{ + "type":"structure", + "required":["ExportJobPropertiesList"], + "members":{ + "ExportJobPropertiesList":{"shape":"ExportJobPropertiesList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListFHIRImportJobsRequest":{ + "type":"structure", + "required":["DatastoreId"], + "members":{ + "DatastoreId":{"shape":"DatastoreId"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResultsInteger"}, + "JobName":{"shape":"JobName"}, + "JobStatus":{"shape":"JobStatus"}, + "SubmittedBefore":{"shape":"Timestamp"}, + "SubmittedAfter":{"shape":"Timestamp"} + } + }, + "ListFHIRImportJobsResponse":{ + "type":"structure", + "required":["ImportJobPropertiesList"], + "members":{ + "ImportJobPropertiesList":{"shape":"ImportJobPropertiesList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{"shape":"AmazonResourceName"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagList"} + } + }, "MaxResultsInteger":{ "type":"integer", "max":500, @@ -443,7 +610,7 @@ "OutputDataConfig":{ "type":"structure", "members":{ - "S3Uri":{"shape":"S3Uri"} + "S3Configuration":{"shape":"S3Configuration"} }, "union":true }, @@ -465,11 +632,29 @@ }, "exception":true }, + "S3Configuration":{ + "type":"structure", + "required":[ + "S3Uri", + "KmsKeyId" + ], + "members":{ + "S3Uri":{"shape":"S3Uri"}, + "KmsKeyId":{"shape":"EncryptionKeyID"} + } + }, "S3Uri":{ "type":"string", "max":1024, "pattern":"s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" }, + "SseConfiguration":{ + "type":"structure", + "required":["KmsEncryptionConfig"], + "members":{ + "KmsEncryptionConfig":{"shape":"KmsEncryptionConfig"} + } + }, "StartFHIRExportJobRequest":{ "type":"structure", "required":[ @@ -505,6 +690,7 @@ "type":"structure", "required":[ "InputDataConfig", + "JobOutputDataConfig", "DatastoreId", "DataAccessRoleArn", "ClientToken" @@ -512,6 +698,7 @@ "members":{ "JobName":{"shape":"JobName"}, "InputDataConfig":{"shape":"InputDataConfig"}, + "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DatastoreId":{"shape":"DatastoreId"}, "DataAccessRoleArn":{"shape":"IamRoleArn"}, "ClientToken":{ @@ -537,6 +724,57 @@ "max":10000, "pattern":"[\\P{M}\\p{M}]{0,10000}" }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{"shape":"AmazonResourceName"}, + "Tags":{"shape":"TagList"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -545,6 +783,22 @@ "exception":true }, "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{"shape":"AmazonResourceName"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/models/apis/healthlake/2017-07-01/docs-2.json b/models/apis/healthlake/2017-07-01/docs-2.json index 63aba2dd25e..3f645c7ecde 100644 --- a/models/apis/healthlake/2017-07-01/docs-2.json +++ b/models/apis/healthlake/2017-07-01/docs-2.json @@ -6,10 +6,15 @@ "DeleteFHIRDatastore": "Deletes a Data Store.
", "DescribeFHIRDatastore": "Gets the properties associated with the FHIR Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.
", "DescribeFHIRExportJob": "Displays the properties of a FHIR export job, including the ID, ARN, name, and the status of the job.
", - "DescribeFHIRImportJob": "Displays the properties of a FHIR import job, including the ID, ARN, name, and the status of the job.
", + "DescribeFHIRImportJob": "Displays the properties of a FHIR import job, including the ID, ARN, name, and the status of the job.
", "ListFHIRDatastores": "Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store status.
", + "ListFHIRExportJobs": "Lists all FHIR export jobs associated with an account and their statuses.
", + "ListFHIRImportJobs": "Lists all FHIR import jobs associated with an account and their statuses.
", + "ListTagsForResource": "Returns a list of all existing tags associated with a Data Store.
", "StartFHIRExportJob": "Begins a FHIR export job.
", - "StartFHIRImportJob": "Begins a FHIR Import job.
" + "StartFHIRImportJob": "Begins a FHIR Import job.
", + "TagResource": "Adds a user specifed key and value tag to a Data Store.
", + "UntagResource": "Removes tags from a Data Store.
" }, "shapes": { "AccessDeniedException": { @@ -17,6 +22,14 @@ "refs": { } }, + "AmazonResourceName": { + "base": null, + "refs": { + "ListTagsForResourceRequest$ResourceARN": "The Amazon Resource Name(ARN) of the Data Store for which tags are being added.
", + "TagResourceRequest$ResourceARN": "The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data Store which tags are being added to.
", + "UntagResourceRequest$ResourceARN": "\"The Amazon Resource Name(ARN) of the Data Store for which tags are being removed
" + } + }, "BoundedLengthString": { "base": null, "refs": { @@ -32,6 +45,12 @@ "StartFHIRImportJobRequest$ClientToken": "Optional user provided token used for ensuring idempotency.
" } }, + "CmkType": { + "base": null, + "refs": { + "KmsEncryptionConfig$CmkType": "The type of customer-managed-key(CMK) used for encyrption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.
" + } + }, "ConflictException": { "base": "The Data Store is in a transition state and the user requested action can not be performed.
", "refs": { @@ -73,6 +92,8 @@ "DescribeFHIRImportJobRequest$DatastoreId": "The AWS-generated ID of the Data Store.
", "ExportJobProperties$DatastoreId": "The AWS generated ID for the Data Store from which files are being exported for an export job.
", "ImportJobProperties$DatastoreId": "The datastore id used when the Import job was created.
", + "ListFHIRExportJobsRequest$DatastoreId": "This parameter limits the response to the export job with the specified Data Store ID.
", + "ListFHIRImportJobsRequest$DatastoreId": "This parameter limits the response to the import job with the specified Data Store ID.
", "StartFHIRExportJobRequest$DatastoreId": "The AWS generated ID for the Data Store from which files are being exported for an export job.
", "StartFHIRExportJobResponse$DatastoreId": "The AWS generated ID for the Data Store from which files are being exported for an export job.
", "StartFHIRImportJobRequest$DatastoreId": "The AWS-generated Data Store ID.
", @@ -149,10 +170,24 @@ "refs": { } }, + "EncryptionKeyID": { + "base": null, + "refs": { + "KmsEncryptionConfig$KmsKeyId": "The KMS encryption key id/alias used to encrypt the Data Store contents at rest.
", + "S3Configuration$KmsKeyId": "The KMS key ID used to access the S3 bucket.
" + } + }, "ExportJobProperties": { "base": "The properties of a FHIR export job, including the ID, ARN, name, and the status of the job.
", "refs": { - "DescribeFHIRExportJobResponse$ExportJobProperties": "Displays the properties of the export job, including the ID, Arn, Name, and the status of the job.
" + "DescribeFHIRExportJobResponse$ExportJobProperties": "Displays the properties of the export job, including the ID, Arn, Name, and the status of the job.
", + "ExportJobPropertiesList$member": null + } + }, + "ExportJobPropertiesList": { + "base": null, + "refs": { + "ListFHIRExportJobsResponse$ExportJobPropertiesList": "The properties of listed FHIR export jobs, including the ID, ARN, name, and the status of the job.
" } }, "FHIRVersion": { @@ -174,7 +209,14 @@ "ImportJobProperties": { "base": "Displays the properties of the import job, including the ID, Arn, Name, and the status of the Data Store.
", "refs": { - "DescribeFHIRImportJobResponse$ImportJobProperties": "The properties of the Import job request, including the ID, ARN, name, and the status of the job.
" + "DescribeFHIRImportJobResponse$ImportJobProperties": "The properties of the Import job request, including the ID, ARN, name, and the status of the job.
", + "ImportJobPropertiesList$member": null + } + }, + "ImportJobPropertiesList": { + "base": null, + "refs": { + "ListFHIRImportJobsResponse$ImportJobPropertiesList": "The properties of a listed FHIR import jobs, including the ID, ARN, name, and the status of the job.
" } }, "InputDataConfig": { @@ -205,6 +247,8 @@ "refs": { "ExportJobProperties$JobName": "The user generated name for an export job.
", "ImportJobProperties$JobName": "The user-generated name for an Import job.
", + "ListFHIRExportJobsRequest$JobName": "This parameter limits the response to the export job with the specified job name.
", + "ListFHIRImportJobsRequest$JobName": "This parameter limits the response to the import job with the specified job name.
", "StartFHIRExportJobRequest$JobName": "The user generated name for an export job.
", "StartFHIRImportJobRequest$JobName": "The name of the FHIR Import job in the StartFHIRImport job request.
" } @@ -214,10 +258,18 @@ "refs": { "ExportJobProperties$JobStatus": "The status of a FHIR export job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED, or FAILED.
", "ImportJobProperties$JobStatus": "The job status for an Import job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED, FAILED.
", + "ListFHIRExportJobsRequest$JobStatus": "This parameter limits the response to the export jobs with the specified job status.
", + "ListFHIRImportJobsRequest$JobStatus": "This parameter limits the response to the import job with the specified job status.
", "StartFHIRExportJobResponse$JobStatus": "The status of a FHIR export job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED, or FAILED.
", "StartFHIRImportJobResponse$JobStatus": "The status of an import job.
" } }, + "KmsEncryptionConfig": { + "base": "The customer-managed-key(CMK) used when creating a Data Store. If a customer owned key is not specified, an AWS owned key will be used for encryption.
", + "refs": { + "SseConfiguration$KmsEncryptionConfig": "The KMS encryption configuration used to provide details for data encryption.
" + } + }, "ListFHIRDatastoresRequest": { "base": null, "refs": { @@ -228,10 +280,42 @@ "refs": { } }, + "ListFHIRExportJobsRequest": { + "base": null, + "refs": { + } + }, + "ListFHIRExportJobsResponse": { + "base": null, + "refs": { + } + }, + "ListFHIRImportJobsRequest": { + "base": null, + "refs": { + } + }, + "ListFHIRImportJobsResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, "MaxResultsInteger": { "base": null, "refs": { - "ListFHIRDatastoresRequest$MaxResults": "The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest call.
" + "ListFHIRDatastoresRequest$MaxResults": "The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest call.
", + "ListFHIRExportJobsRequest$MaxResults": "This parameter limits the number of results returned for a ListFHIRExportJobs to a maximum quantity specified by the user.
", + "ListFHIRImportJobsRequest$MaxResults": "This parameter limits the number of results returned for a ListFHIRImportJobs to a maximum quantity specified by the user.
" } }, "Message": { @@ -245,14 +329,20 @@ "base": null, "refs": { "ListFHIRDatastoresRequest$NextToken": "Fetches the next page of Data Stores when results are paginated.
", - "ListFHIRDatastoresResponse$NextToken": "Pagination token that can be used to retrieve the next page of results.
" + "ListFHIRDatastoresResponse$NextToken": "Pagination token that can be used to retrieve the next page of results.
", + "ListFHIRExportJobsRequest$NextToken": "A pagination token used to identify the next page of results to return for a ListFHIRExportJobs query.
", + "ListFHIRExportJobsResponse$NextToken": "A pagination token used to identify the next page of results to return for a ListFHIRExportJobs query.
", + "ListFHIRImportJobsRequest$NextToken": "A pagination token used to identify the next page of results to return for a ListFHIRImportJobs query.
", + "ListFHIRImportJobsResponse$NextToken": "A pagination token used to identify the next page of results to return for a ListFHIRImportJobs query.
" } }, "OutputDataConfig": { "base": "The output data configuration that was supplied when the export job was created.
", "refs": { "ExportJobProperties$OutputDataConfig": "The output data configuration that was supplied when the export job was created.
", - "StartFHIRExportJobRequest$OutputDataConfig": "The output data configuration that was supplied when the export job was created.
" + "ImportJobProperties$JobOutputDataConfig": null, + "StartFHIRExportJobRequest$OutputDataConfig": "The output data configuration that was supplied when the export job was created.
", + "StartFHIRImportJobRequest$JobOutputDataConfig": null } }, "PreloadDataConfig": { @@ -273,11 +363,24 @@ "refs": { } }, + "S3Configuration": { + "base": "The configuration of the S3 bucket for either an import or export job. This includes assigning permissions for access.
", + "refs": { + "OutputDataConfig$S3Configuration": "The output data configuration that was supplied when the export job was created.
" + } + }, "S3Uri": { "base": null, "refs": { "InputDataConfig$S3Uri": "The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.
", - "OutputDataConfig$S3Uri": "The S3Uri is the user specified S3 location to which data will be exported from a FHIR Data Store.
" + "S3Configuration$S3Uri": "The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.
" + } + }, + "SseConfiguration": { + "base": "The server-side encryption key configuration for a customer provided encryption key.
", + "refs": { + "CreateFHIRDatastoreRequest$SseConfiguration": "The server-side encryption key configuration for a customer provided encryption key specified for creating a Data Store.
", + "DatastoreProperties$SseConfiguration": "The server-side encryption key configuration for a customer provided encryption key (CMK).
" } }, "StartFHIRExportJobRequest": { @@ -312,6 +415,49 @@ "ValidationException$Message": null } }, + "Tag": { + "base": "A tag is a label consisting of a user-defined key and value. The form for tags is {\"Key\", \"Value\"}
", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "The key portion of a tag. Tag keys are case sensitive.
", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "The keys for the tags to be removed from the Healthlake Data Store.
" + } + }, + "TagList": { + "base": null, + "refs": { + "CreateFHIRDatastoreRequest$Tags": "Resource tags that are applied to a Data Store when it is created.
", + "ListTagsForResourceResponse$Tags": "Returns a list of tags associated with a Data Store.
", + "TagResourceRequest$Tags": "The user specified key and value pair tags being added to a Data Store.
" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "The value portion of tag. Tag values are case sensitive.
" + } + }, "ThrottlingException": { "base": "The user has exceeded their maximum number of allowed calls to the given API.
", "refs": { @@ -326,7 +472,21 @@ "ExportJobProperties$SubmitTime": "The time an export job was initiated.
", "ExportJobProperties$EndTime": "The time an export job completed.
", "ImportJobProperties$SubmitTime": "The time that the Import job was submitted for processing.
", - "ImportJobProperties$EndTime": "The time that the Import job was completed.
" + "ImportJobProperties$EndTime": "The time that the Import job was completed.
", + "ListFHIRExportJobsRequest$SubmittedBefore": "This parameter limits the response to FHIR export jobs submitted before a user specified date.
", + "ListFHIRExportJobsRequest$SubmittedAfter": "This parameter limits the response to FHIR export jobs submitted after a user specified date.
", + "ListFHIRImportJobsRequest$SubmittedBefore": "This parameter limits the response to FHIR import jobs submitted before a user specified date.
", + "ListFHIRImportJobsRequest$SubmittedAfter": "This parameter limits the response to FHIR import jobs submitted after a user specified date.
" + } + }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { } }, "ValidationException": { diff --git a/models/apis/healthlake/2017-07-01/paginators-1.json b/models/apis/healthlake/2017-07-01/paginators-1.json index c134891ac25..d103dbc646c 100644 --- a/models/apis/healthlake/2017-07-01/paginators-1.json +++ b/models/apis/healthlake/2017-07-01/paginators-1.json @@ -4,6 +4,16 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListFHIRExportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListFHIRImportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/models/apis/lightsail/2016-11-28/api-2.json b/models/apis/lightsail/2016-11-28/api-2.json index e51983013d5..3e9cd0769fb 100644 --- a/models/apis/lightsail/2016-11-28/api-2.json +++ b/models/apis/lightsail/2016-11-28/api-2.json @@ -155,6 +155,37 @@ {"shape":"UnauthenticatedException"} ] }, + "CreateBucket":{ + "name":"CreateBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBucketRequest"}, + "output":{"shape":"CreateBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, + "CreateBucketAccessKey":{ + "name":"CreateBucketAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBucketAccessKeyRequest"}, + "output":{"shape":"CreateBucketAccessKeyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"NotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, "CreateCertificate":{ "name":"CreateCertificate", "http":{ @@ -557,6 +588,38 @@ {"shape":"UnauthenticatedException"} ] }, + "DeleteBucket":{ + "name":"DeleteBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBucketRequest"}, + "output":{"shape":"DeleteBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, + "DeleteBucketAccessKey":{ + "name":"DeleteBucketAccessKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBucketAccessKeyRequest"}, + "output":{"shape":"DeleteBucketAccessKeyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, "DeleteCertificate":{ "name":"DeleteCertificate", "http":{ @@ -1066,6 +1129,69 @@ {"shape":"UnauthenticatedException"} ] }, + "GetBucketAccessKeys":{ + "name":"GetBucketAccessKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketAccessKeysRequest"}, + "output":{"shape":"GetBucketAccessKeysResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, + "GetBucketBundles":{ + "name":"GetBucketBundles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketBundlesRequest"}, + "output":{"shape":"GetBucketBundlesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, + "GetBucketMetricData":{ + "name":"GetBucketMetricData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketMetricDataRequest"}, + "output":{"shape":"GetBucketMetricDataResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, + "GetBuckets":{ + "name":"GetBuckets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetBucketsRequest"}, + "output":{"shape":"GetBucketsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, "GetBundles":{ "name":"GetBundles", "http":{ @@ -2244,6 +2370,22 @@ {"shape":"UnauthenticatedException"} ] }, + "SetResourceAccessForBucket":{ + "name":"SetResourceAccessForBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetResourceAccessForBucketRequest"}, + "output":{"shape":"SetResourceAccessForBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, "StartInstance":{ "name":"StartInstance", "http":{ @@ -2387,6 +2529,38 @@ {"shape":"UnauthenticatedException"} ] }, + "UpdateBucket":{ + "name":"UpdateBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBucketRequest"}, + "output":{"shape":"UpdateBucketResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, + "UpdateBucketBundle":{ + "name":"UpdateBucketBundle", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBucketBundleRequest"}, + "output":{"shape":"UpdateBucketBundleResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"UnauthenticatedException"} + ] + }, "UpdateContainerService":{ "name":"UpdateContainerService", "http":{ @@ -2528,6 +2702,37 @@ "outbound" ] }, + "AccessKey":{ + "type":"structure", + "members":{ + "accessKeyId":{"shape":"IAMAccessKeyId"}, + "secretAccessKey":{"shape":"NonEmptyString"}, + "status":{"shape":"StatusType"}, + "createdAt":{"shape":"IsoDate"} + } + }, + "AccessKeyList":{ + "type":"list", + "member":{"shape":"AccessKey"} + }, + "AccessReceiverList":{ + "type":"list", + "member":{"shape":"ResourceReceivingAccess"} + }, + "AccessRules":{ + "type":"structure", + "members":{ + "getObject":{"shape":"AccessType"}, + "allowPublicOverrides":{"shape":"boolean"} + } + }, + "AccessType":{ + "type":"string", + "enum":[ + "public", + "private" + ] + }, "AccountSetupInProgressException":{ "type":"structure", "members":{ @@ -2799,6 +3004,65 @@ "app" ] }, + "Bucket":{ + "type":"structure", + "members":{ + "resourceType":{"shape":"NonEmptyString"}, + "accessRules":{"shape":"AccessRules"}, + "arn":{"shape":"NonEmptyString"}, + "bundleId":{"shape":"NonEmptyString"}, + "createdAt":{"shape":"IsoDate"}, + "url":{"shape":"NonEmptyString"}, + "location":{"shape":"ResourceLocation"}, + "name":{"shape":"BucketName"}, + "supportCode":{"shape":"NonEmptyString"}, + "tags":{"shape":"TagList"}, + "objectVersioning":{"shape":"NonEmptyString"}, + "ableToUpdateBundle":{"shape":"boolean"}, + "readonlyAccessAccounts":{"shape":"PartnerIdList"}, + "resourcesReceivingAccess":{"shape":"AccessReceiverList"}, + "state":{"shape":"BucketState"} + } + }, + "BucketBundle":{ + "type":"structure", + "members":{ + "bundleId":{"shape":"NonEmptyString"}, + "name":{"shape":"NonEmptyString"}, + "price":{"shape":"float"}, + "storagePerMonthInGb":{"shape":"integer"}, + "transferPerMonthInGb":{"shape":"integer"}, + "isActive":{"shape":"boolean"} + } + }, + "BucketBundleList":{ + "type":"list", + "member":{"shape":"BucketBundle"} + }, + "BucketList":{ + "type":"list", + "member":{"shape":"Bucket"} + }, + "BucketMetricName":{ + "type":"string", + "enum":[ + "BucketSizeBytes", + "NumberOfObjects" + ] + }, + "BucketName":{ + "type":"string", + "max":54, + "min":3, + "pattern":"^[a-z0-9][a-z0-9-]{1,52}[a-z0-9]$" + }, + "BucketState":{ + "type":"structure", + "members":{ + "code":{"shape":"NonEmptyString"}, + "message":{"shape":"string"} + } + }, "Bundle":{ "type":"structure", "members":{ @@ -3278,6 +3542,40 @@ "operations":{"shape":"OperationList"} } }, + "CreateBucketAccessKeyRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{"shape":"BucketName"} + } + }, + "CreateBucketAccessKeyResult":{ + "type":"structure", + "members":{ + "accessKey":{"shape":"AccessKey"}, + "operations":{"shape":"OperationList"} + } + }, + "CreateBucketRequest":{ + "type":"structure", + "required":[ + "bucketName", + "bundleId" + ], + "members":{ + "bucketName":{"shape":"BucketName"}, + "bundleId":{"shape":"NonEmptyString"}, + "tags":{"shape":"TagList"}, + "enableObjectVersioning":{"shape":"boolean"} + } + }, + "CreateBucketResult":{ + "type":"structure", + "members":{ + "bucket":{"shape":"Bucket"}, + "operations":{"shape":"OperationList"} + } + }, "CreateCertificateRequest":{ "type":"structure", "required":[ @@ -3731,6 +4029,37 @@ "operations":{"shape":"OperationList"} } }, + "DeleteBucketAccessKeyRequest":{ + "type":"structure", + "required":[ + "bucketName", + "accessKeyId" + ], + "members":{ + "bucketName":{"shape":"BucketName"}, + "accessKeyId":{"shape":"NonEmptyString"} + } + }, + "DeleteBucketAccessKeyResult":{ + "type":"structure", + "members":{ + "operations":{"shape":"OperationList"} + } + }, + "DeleteBucketRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{"shape":"BucketName"}, + "forceDelete":{"shape":"boolean"} + } + }, + "DeleteBucketResult":{ + "type":"structure", + "members":{ + "operations":{"shape":"OperationList"} + } + }, "DeleteCertificateRequest":{ "type":"structure", "required":["certificateName"], @@ -4400,6 +4729,74 @@ "nextPageToken":{"shape":"string"} } }, + "GetBucketAccessKeysRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{"shape":"BucketName"} + } + }, + "GetBucketAccessKeysResult":{ + "type":"structure", + "members":{ + "accessKeys":{"shape":"AccessKeyList"} + } + }, + "GetBucketBundlesRequest":{ + "type":"structure", + "members":{ + "includeInactive":{"shape":"boolean"} + } + }, + "GetBucketBundlesResult":{ + "type":"structure", + "members":{ + "bundles":{"shape":"BucketBundleList"} + } + }, + "GetBucketMetricDataRequest":{ + "type":"structure", + "required":[ + "bucketName", + "metricName", + "startTime", + "endTime", + "period", + "statistics", + "unit" + ], + "members":{ + "bucketName":{"shape":"BucketName"}, + "metricName":{"shape":"BucketMetricName"}, + "startTime":{"shape":"IsoDate"}, + "endTime":{"shape":"IsoDate"}, + "period":{"shape":"MetricPeriod"}, + "statistics":{"shape":"MetricStatisticList"}, + "unit":{"shape":"MetricUnit"} + } + }, + "GetBucketMetricDataResult":{ + "type":"structure", + "members":{ + "metricName":{"shape":"BucketMetricName"}, + "metricData":{"shape":"MetricDatapointList"} + } + }, + "GetBucketsRequest":{ + "type":"structure", + "members":{ + "bucketName":{"shape":"BucketName"}, + "pageToken":{"shape":"string"}, + "includeConnectedResources":{"shape":"boolean"} + } + }, + "GetBucketsResult":{ + "type":"structure", + "members":{ + "buckets":{"shape":"BucketList"}, + "nextPageToken":{"shape":"string"} + } + }, "GetBundlesRequest":{ "type":"structure", "members":{ @@ -5244,6 +5641,13 @@ "type":"list", "member":{"shape":"HostKeyAttributes"} }, + "IAMAccessKeyId":{ + "type":"string", + "max":20, + "min":20, + "pattern":"^[A-Z0-9]{20}$", + "sensitive":true + }, "ImportKeyPairRequest":{ "type":"structure", "required":[ @@ -6075,7 +6479,14 @@ "CreateContainerServiceDeployment", "CreateContainerServiceRegistryLogin", "RegisterContainerImage", - "DeleteContainerImage" + "DeleteContainerImage", + "CreateBucket", + "DeleteBucket", + "CreateBucketAccessKey", + "DeleteBucketAccessKey", + "UpdateBucketBundle", + "UpdateBucket", + "SetResourceAccessForBucket" ] }, "Origin":{ @@ -6094,6 +6505,11 @@ "https-only" ] }, + "PartnerIdList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":10 + }, "PasswordData":{ "type":"structure", "members":{ @@ -6303,7 +6719,8 @@ "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", - "ap-northeast-2" + "ap-northeast-2", + "eu-north-1" ] }, "RegisterContainerImageRequest":{ @@ -6536,6 +6953,13 @@ "type":"string", "pattern":"^arn:(aws[^:]*):([a-zA-Z0-9-]+):([a-z0-9-]+):([0-9]+):([a-zA-Z]+)/([a-zA-Z0-9-]+)$" }, + "ResourceBucketAccess":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, "ResourceLocation":{ "type":"structure", "members":{ @@ -6551,6 +6975,13 @@ "type":"list", "member":{"shape":"ResourceName"} }, + "ResourceReceivingAccess":{ + "type":"structure", + "members":{ + "name":{"shape":"NonEmptyString"}, + "resourceType":{"shape":"NonEmptyString"} + } + }, "ResourceRecord":{ "type":"structure", "members":{ @@ -6580,7 +7011,8 @@ "Alarm", "ContactMethod", "Distribution", - "Certificate" + "Certificate", + "Bucket" ] }, "RevocationReason":{"type":"string"}, @@ -6632,6 +7064,25 @@ "operations":{"shape":"OperationList"} } }, + "SetResourceAccessForBucketRequest":{ + "type":"structure", + "required":[ + "resourceName", + "bucketName", + "access" + ], + "members":{ + "resourceName":{"shape":"ResourceName"}, + "bucketName":{"shape":"BucketName"}, + "access":{"shape":"ResourceBucketAccess"} + } + }, + "SetResourceAccessForBucketResult":{ + "type":"structure", + "members":{ + "operations":{"shape":"OperationList"} + } + }, "StartInstanceRequest":{ "type":"structure", "required":["instanceName"], @@ -6676,6 +7127,13 @@ "type":"list", "member":{"shape":"StaticIp"} }, + "StatusType":{ + "type":"string", + "enum":[ + "Active", + "Inactive" + ] + }, "StopInstanceRequest":{ "type":"structure", "required":["instanceName"], @@ -6821,6 +7279,40 @@ "operations":{"shape":"OperationList"} } }, + "UpdateBucketBundleRequest":{ + "type":"structure", + "required":[ + "bucketName", + "bundleId" + ], + "members":{ + "bucketName":{"shape":"BucketName"}, + "bundleId":{"shape":"NonEmptyString"} + } + }, + "UpdateBucketBundleResult":{ + "type":"structure", + "members":{ + "operations":{"shape":"OperationList"} + } + }, + "UpdateBucketRequest":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{"shape":"BucketName"}, + "accessRules":{"shape":"AccessRules"}, + "versioning":{"shape":"NonEmptyString"}, + "readonlyAccessAccounts":{"shape":"PartnerIdList"} + } + }, + "UpdateBucketResult":{ + "type":"structure", + "members":{ + "bucket":{"shape":"Bucket"}, + "operations":{"shape":"OperationList"} + } + }, "UpdateContainerServiceRequest":{ "type":"structure", "required":["serviceName"], diff --git a/models/apis/lightsail/2016-11-28/docs-2.json b/models/apis/lightsail/2016-11-28/docs-2.json index ac31114352c..db747187432 100644 --- a/models/apis/lightsail/2016-11-28/docs-2.json +++ b/models/apis/lightsail/2016-11-28/docs-2.json @@ -1,67 +1,75 @@ { "version": "2.0", - "service": "Amazon Lightsail is the easiest way to get started with Amazon Web Services (AWS) for developers who need to build websites or web applications. It includes everything you need to launch your project quickly - instances (virtual private servers), container services, managed databases, SSD-based block storage, static IP addresses, load balancers, content delivery network (CDN) distributions, DNS management of registered domains, and resource snapshots (backups) - for a low, predictable monthly price.
You can manage your Lightsail resources using the Lightsail console, Lightsail API, AWS Command Line Interface (AWS CLI), or SDKs. For more information about Lightsail concepts and tasks, see the Lightsail Dev Guide.
This API Reference provides detailed information about the actions, data types, parameters, and errors of the Lightsail service. For more information about the supported AWS Regions, endpoints, and service quotas of the Lightsail service, see Amazon Lightsail Endpoints and Quotas in the AWS General Reference.
", + "service": "Amazon Lightsail is the easiest way to get started with Amazon Web Services (AWS) for developers who need to build websites or web applications. It includes everything you need to launch your project quickly - instances (virtual private servers), container services, storage buckets, managed databases, SSD-based block storage, static IP addresses, load balancers, content delivery network (CDN) distributions, DNS management of registered domains, and resource snapshots (backups) - for a low, predictable monthly price.
You can manage your Lightsail resources using the Lightsail console, Lightsail API, AWS Command Line Interface (AWS CLI), or SDKs. For more information about Lightsail concepts and tasks, see the Amazon Lightsail Developer Guide.
This API Reference provides detailed information about the actions, data types, parameters, and errors of the Lightsail service. For more information about the supported AWS Regions, endpoints, and service quotas of the Lightsail service, see Amazon Lightsail Endpoints and Quotas in the AWS General Reference.
", "operations": { "AllocateStaticIp": "Allocates a static IP address.
", "AttachCertificateToDistribution": "Attaches an SSL/TLS certificate to your Amazon Lightsail content delivery network (CDN) distribution.
After the certificate is attached, your distribution accepts HTTPS traffic for all of the domains that are associated with the certificate.
Use the CreateCertificate
action to create a certificate that you can attach to your distribution.
Only certificates created in the us-east-1
AWS Region can be attached to Lightsail distributions. Lightsail distributions are global resources that can reference an origin in any AWS Region, and distribute its content globally. However, all distributions are located in the us-east-1
Region.
Attaches a block storage disk to a running or stopped Lightsail instance and exposes it to the instance with the specified disk name.
The attach disk
operation supports tag-based access control via resource tags applied to the resource identified by disk name
. For more information, see the Lightsail Dev Guide.
Attaches one or more Lightsail instances to a load balancer.
After some time, the instances are attached to the load balancer and the health check status is available.
The attach instances to load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Dev Guide.
Attaches a Transport Layer Security (TLS) certificate to your load balancer. TLS is just an updated, more secure version of Secure Socket Layer (SSL).
Once you create and validate your certificate, you can attach it to your load balancer. You can also use this API to rotate the certificates on your account. Use the AttachLoadBalancerTlsCertificate
action with the non-attached certificate, and it will replace the existing one and become the attached certificate.
The AttachLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Dev Guide.
Attaches a block storage disk to a running or stopped Lightsail instance and exposes it to the instance with the specified disk name.
The attach disk
operation supports tag-based access control via resource tags applied to the resource identified by disk name
. For more information, see the Amazon Lightsail Developer Guide.
Attaches one or more Lightsail instances to a load balancer.
After some time, the instances are attached to the load balancer and the health check status is available.
The attach instances to load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Developer Guide.
Attaches a Transport Layer Security (TLS) certificate to your load balancer. TLS is just an updated, more secure version of Secure Socket Layer (SSL).
Once you create and validate your certificate, you can attach it to your load balancer. You can also use this API to rotate the certificates on your account. Use the AttachLoadBalancerTlsCertificate
action with the non-attached certificate, and it will replace the existing one and become the attached certificate.
The AttachLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Attaches a static IP address to a specific Amazon Lightsail instance.
", - "CloseInstancePublicPorts": "Closes ports for a specific Amazon Lightsail instance.
The CloseInstancePublicPorts
action supports tag-based access control via resource tags applied to the resource identified by instanceName
. For more information, see the Lightsail Dev Guide.
Closes ports for a specific Amazon Lightsail instance.
The CloseInstancePublicPorts
action supports tag-based access control via resource tags applied to the resource identified by instanceName
. For more information, see the Amazon Lightsail Developer Guide.
Copies a manual snapshot of an instance or disk as another manual snapshot, or copies an automatic snapshot of an instance or disk as a manual snapshot. This operation can also be used to copy a manual or automatic snapshot of an instance or a disk from one AWS Region to another in Amazon Lightsail.
When copying a manual snapshot, be sure to define the source region
, source snapshot name
, and target snapshot name
parameters.
When copying an automatic snapshot, be sure to define the source region
, source resource name
, target snapshot name
, and either the restore date
or the use latest restorable auto snapshot
parameters.
Creates an Amazon Lightsail bucket.
A bucket is a cloud storage resource available in the Lightsail object storage service. Use buckets to store objects such as data and its descriptive metadata. For more information about buckets, see Buckets in Amazon Lightsail in the Amazon Lightsail Developer Guide.
", + "CreateBucketAccessKey": "Creates a new access key for the specified Amazon Lightsail bucket. Access keys consist of an access key ID and corresponding secret access key.
Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the GetBucketAccessKeys action to get a list of current access keys for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.
The secretAccessKey
value is returned only in response to the CreateBucketAccessKey
action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.
Creates an SSL/TLS certificate for an Amazon Lightsail content delivery network (CDN) distribution and a container service.
After the certificate is valid, use the AttachCertificateToDistribution
action to use the certificate and its domains with your distribution. Or use the UpdateContainerService
action to use the certificate and its domains with your container service.
Only certificates created in the us-east-1
AWS Region can be attached to Lightsail distributions. Lightsail distributions are global resources that can reference an origin in any AWS Region, and distribute its content globally. However, all distributions are located in the us-east-1
Region.
Creates an AWS CloudFormation stack, which creates a new Amazon EC2 instance from an exported Amazon Lightsail snapshot. This operation results in a CloudFormation stack record that can be used to track the AWS CloudFormation stack created. Use the get cloud formation stack records
operation to get a list of the CloudFormation stacks created.
Wait until after your new Amazon EC2 instance is created before running the create cloud formation stack
operation again with the same export snapshot record.
Creates an email or SMS text message contact method.
A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.
", "CreateContainerService": "Creates an Amazon Lightsail container service.
A Lightsail container service is a compute resource to which you can deploy containers. For more information, see Container services in Amazon Lightsail in the Lightsail Dev Guide.
", - "CreateContainerServiceDeployment": "Creates a deployment for your Amazon Lightsail container service.
A deployment specifies the containers that will be launched on the container service and their settings, such as the ports to open, the environment variables to apply, and the launch command to run. It also specifies the container that will serve as the public endpoint of the deployment and its settings, such as the HTTP or HTTPS port to use, and the health check configuration.
You can deploy containers to your container service using container images from a public registry like Docker Hub, or from your local machine. For more information, see Creating container images for your Amazon Lightsail container services in the Lightsail Dev Guide.
", - "CreateContainerServiceRegistryLogin": "Creates a temporary set of log in credentials that you can use to log in to the Docker process on your local machine. After you're logged in, you can use the native Docker commands to push your local container images to the container image registry of your Amazon Lightsail account so that you can use them with your Lightsail container service. The log in credentials expire 12 hours after they are created, at which point you will need to create a new set of log in credentials.
You can only push container images to the container service registry of your Lightsail account. You cannot pull container images or perform any other container image management actions on the container service registry.
After you push your container images to the container image registry of your Lightsail account, use the RegisterContainerImage
action to register the pushed images to a specific Lightsail container service.
This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.
Creates a block storage disk that can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a
).
The create disk
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting disk can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a
).
The create disk from snapshot
operation supports tag-based access control via request tags and resource tags applied to the resource identified by disk snapshot name
. For more information, see the Lightsail Dev Guide.
Creates a snapshot of a block storage disk. You can use snapshots for backups, to make copies of disks, and to save data before shutting down a Lightsail instance.
You can take a snapshot of an attached disk that is in use; however, snapshots only capture data that has been written to your disk at the time the snapshot command is issued. This may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the disk long enough to take a snapshot, your snapshot should be complete. Nevertheless, if you cannot pause all file writes to the disk, you should unmount the disk from within the Lightsail instance, issue the create disk snapshot command, and then remount the disk to ensure a consistent and complete snapshot. You may remount and use your disk while the snapshot status is pending.
You can also use this operation to create a snapshot of an instance's system volume. You might want to do this, for example, to recover data from the system volume of a botched instance or to create a backup of the system volume like you would for a block storage disk. To create a snapshot of a system volume, just define the instance name
parameter when issuing the snapshot command, and a snapshot of the defined instance's system volume will be created. After the snapshot is available, you can create a block storage disk from the snapshot and attach it to a running instance to access the data on the disk.
The create disk snapshot
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates a deployment for your Amazon Lightsail container service.
A deployment specifies the containers that will be launched on the container service and their settings, such as the ports to open, the environment variables to apply, and the launch command to run. It also specifies the container that will serve as the public endpoint of the deployment and its settings, such as the HTTP or HTTPS port to use, and the health check configuration.
You can deploy containers to your container service using container images from a public registry like Docker Hub, or from your local machine. For more information, see Creating container images for your Amazon Lightsail container services in the Amazon Lightsail Developer Guide.
", + "CreateContainerServiceRegistryLogin": "Creates a temporary set of log in credentials that you can use to log in to the Docker process on your local machine. After you're logged in, you can use the native Docker commands to push your local container images to the container image registry of your Amazon Lightsail account so that you can use them with your Lightsail container service. The log in credentials expire 12 hours after they are created, at which point you will need to create a new set of log in credentials.
You can only push container images to the container service registry of your Lightsail account. You cannot pull container images or perform any other container image management actions on the container service registry.
After you push your container images to the container image registry of your Lightsail account, use the RegisterContainerImage
action to register the pushed images to a specific Lightsail container service.
This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Amazon Lightsail Developer Guide.
Creates a block storage disk that can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a
).
The create disk
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting disk can be attached to an Amazon Lightsail instance in the same Availability Zone (e.g., us-east-2a
).
The create disk from snapshot
operation supports tag-based access control via request tags and resource tags applied to the resource identified by disk snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Creates a snapshot of a block storage disk. You can use snapshots for backups, to make copies of disks, and to save data before shutting down a Lightsail instance.
You can take a snapshot of an attached disk that is in use; however, snapshots only capture data that has been written to your disk at the time the snapshot command is issued. This may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the disk long enough to take a snapshot, your snapshot should be complete. Nevertheless, if you cannot pause all file writes to the disk, you should unmount the disk from within the Lightsail instance, issue the create disk snapshot command, and then remount the disk to ensure a consistent and complete snapshot. You may remount and use your disk while the snapshot status is pending.
You can also use this operation to create a snapshot of an instance's system volume. You might want to do this, for example, to recover data from the system volume of a botched instance or to create a backup of the system volume like you would for a block storage disk. To create a snapshot of a system volume, just define the instance name
parameter when issuing the snapshot command, and a snapshot of the defined instance's system volume will be created. After the snapshot is available, you can create a block storage disk from the snapshot and attach it to a running instance to access the data on the disk.
The create disk snapshot
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates an Amazon Lightsail content delivery network (CDN) distribution.
A distribution is a globally distributed network of caching servers that improve the performance of your website or web application hosted on a Lightsail instance. For more information, see Content delivery networks in Amazon Lightsail.
", - "CreateDomain": "Creates a domain resource for the specified domain (e.g., example.com).
The create domain
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates one of the following domain name system (DNS) records in a domain DNS zone: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).
The create domain entry
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Lightsail Dev Guide.
Creates a snapshot of a specific virtual private server, or instance. You can use a snapshot to create a new instance that is based on that snapshot.
The create instance snapshot
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates one or more Amazon Lightsail instances.
The create instances
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates one or more new instances from a manual or automatic snapshot of an instance.
The create instances from snapshot
operation supports tag-based access control via request tags and resource tags applied to the resource identified by instance snapshot name
. For more information, see the Lightsail Dev Guide.
Creates an SSH key pair.
The create key pair
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates a Lightsail load balancer. To learn more about deciding whether to load balance your application, see Configure your Lightsail instances for load balancing. You can create up to 5 load balancers per AWS Region in your account.
When you create a load balancer, you can specify a unique name and port settings. To change additional load balancer settings, use the UpdateLoadBalancerAttribute
operation.
The create load balancer
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates an SSL/TLS certificate for an Amazon Lightsail load balancer.
TLS is just an updated, more secure version of Secure Socket Layer (SSL).
The CreateLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Dev Guide.
Creates a new database in Amazon Lightsail.
The create relational database
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates a new database from an existing database snapshot in Amazon Lightsail.
You can create a new database from a snapshot in if something goes wrong with your original database, or to change it to a different plan, such as a high availability or standard plan.
The create relational database from snapshot
operation supports tag-based access control via request tags and resource tags applied to the resource identified by relationalDatabaseSnapshotName. For more information, see the Lightsail Dev Guide.
Creates a snapshot of your database in Amazon Lightsail. You can use snapshots for backups, to make copies of a database, and to save data before deleting a database.
The create relational database snapshot
operation supports tag-based access control via request tags. For more information, see the Lightsail Dev Guide.
Creates a domain resource for the specified domain (e.g., example.com).
The create domain
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates one of the following domain name system (DNS) records in a domain DNS zone: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).
The create domain entry
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Amazon Lightsail Developer Guide.
Creates a snapshot of a specific virtual private server, or instance. You can use a snapshot to create a new instance that is based on that snapshot.
The create instance snapshot
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates one or more Amazon Lightsail instances.
The create instances
operation supports tag-based access control via request tags. For more information, see the Lightsail Developer Guide.
Creates one or more new instances from a manual or automatic snapshot of an instance.
The create instances from snapshot
operation supports tag-based access control via request tags and resource tags applied to the resource identified by instance snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Creates an SSH key pair.
The create key pair
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates a Lightsail load balancer. To learn more about deciding whether to load balance your application, see Configure your Lightsail instances for load balancing. You can create up to 5 load balancers per AWS Region in your account.
When you create a load balancer, you can specify a unique name and port settings. To change additional load balancer settings, use the UpdateLoadBalancerAttribute
operation.
The create load balancer
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates an SSL/TLS certificate for an Amazon Lightsail load balancer.
TLS is just an updated, more secure version of Secure Socket Layer (SSL).
The CreateLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Creates a new database in Amazon Lightsail.
The create relational database
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates a new database from an existing database snapshot in Amazon Lightsail.
You can create a new database from a snapshot in if something goes wrong with your original database, or to change it to a different plan, such as a high availability or standard plan.
The create relational database from snapshot
operation supports tag-based access control via request tags and resource tags applied to the resource identified by relationalDatabaseSnapshotName. For more information, see the Amazon Lightsail Developer Guide.
Creates a snapshot of your database in Amazon Lightsail. You can use snapshots for backups, to make copies of a database, and to save data before deleting a database.
The create relational database snapshot
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Deletes an alarm.
An alarm is used to monitor a single metric for one of your resources. When a metric condition is met, the alarm can notify you by email, SMS text message, and a banner displayed on the Amazon Lightsail console. For more information, see Alarms in Amazon Lightsail.
", - "DeleteAutoSnapshot": "Deletes an automatic snapshot of an instance or disk. For more information, see the Lightsail Dev Guide.
", + "DeleteAutoSnapshot": "Deletes an automatic snapshot of an instance or disk. For more information, see the Amazon Lightsail Developer Guide.
", + "DeleteBucket": "Deletes a Amazon Lightsail bucket.
When you delete your bucket, the bucket name is released and can be reused for a new bucket in your account or another AWS account.
Deletes an access key for the specified Amazon Lightsail bucket.
We recommend that you delete an access key if the secret access key is compromised.
For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.
", "DeleteCertificate": "Deletes an SSL/TLS certificate for your Amazon Lightsail content delivery network (CDN) distribution.
Certificates that are currently attached to a distribution cannot be deleted. Use the DetachCertificateFromDistribution
action to detach a certificate from a distribution.
Deletes a contact method.
A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.
", "DeleteContainerImage": "Deletes a container image that is registered to your Amazon Lightsail container service.
", "DeleteContainerService": "Deletes your Amazon Lightsail container service.
", - "DeleteDisk": "Deletes the specified block storage disk. The disk must be in the available
state (not attached to a Lightsail instance).
The disk may remain in the deleting
state for several minutes.
The delete disk
operation supports tag-based access control via resource tags applied to the resource identified by disk name
. For more information, see the Lightsail Dev Guide.
Deletes the specified disk snapshot.
When you make periodic snapshots of a disk, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the disk.
The delete disk snapshot
operation supports tag-based access control via resource tags applied to the resource identified by disk snapshot name
. For more information, see the Lightsail Dev Guide.
Deletes the specified block storage disk. The disk must be in the available
state (not attached to a Lightsail instance).
The disk may remain in the deleting
state for several minutes.
The delete disk
operation supports tag-based access control via resource tags applied to the resource identified by disk name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes the specified disk snapshot.
When you make periodic snapshots of a disk, the snapshots are incremental, and only the blocks on the device that have changed since your last snapshot are saved in the new snapshot. When you delete a snapshot, only the data not needed for any other snapshot is removed. So regardless of which prior snapshots have been deleted, all active snapshots will have access to all the information needed to restore the disk.
The delete disk snapshot
operation supports tag-based access control via resource tags applied to the resource identified by disk snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes your Amazon Lightsail content delivery network (CDN) distribution.
", - "DeleteDomain": "Deletes the specified domain recordset and all of its domain records.
The delete domain
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Lightsail Dev Guide.
Deletes a specific domain entry.
The delete domain entry
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Lightsail Dev Guide.
Deletes an Amazon Lightsail instance.
The delete instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Lightsail Dev Guide.
Deletes a specific snapshot of a virtual private server (or instance).
The delete instance snapshot
operation supports tag-based access control via resource tags applied to the resource identified by instance snapshot name
. For more information, see the Lightsail Dev Guide.
Deletes a specific SSH key pair.
The delete key pair
operation supports tag-based access control via resource tags applied to the resource identified by key pair name
. For more information, see the Lightsail Dev Guide.
Deletes the known host key or certificate used by the Amazon Lightsail browser-based SSH or RDP clients to authenticate an instance. This operation enables the Lightsail browser-based SSH or RDP clients to connect to the instance after a host key mismatch.
Perform this operation only if you were expecting the host key or certificate mismatch or if you are familiar with the new host key or certificate on the instance. For more information, see Troubleshooting connection issues when using the Amazon Lightsail browser-based SSH or RDP client.
Deletes a Lightsail load balancer and all its associated SSL/TLS certificates. Once the load balancer is deleted, you will need to create a new load balancer, create a new certificate, and verify domain ownership again.
The delete load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Dev Guide.
Deletes an SSL/TLS certificate associated with a Lightsail load balancer.
The DeleteLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Dev Guide.
Deletes a database in Amazon Lightsail.
The delete relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.
Deletes a database snapshot in Amazon Lightsail.
The delete relational database snapshot
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.
Deletes the specified domain recordset and all of its domain records.
The delete domain
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes a specific domain entry.
The delete domain entry
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes an Amazon Lightsail instance.
The delete instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes a specific snapshot of a virtual private server (or instance).
The delete instance snapshot
operation supports tag-based access control via resource tags applied to the resource identified by instance snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes a specific SSH key pair.
The delete key pair
operation supports tag-based access control via resource tags applied to the resource identified by key pair name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes the known host key or certificate used by the Amazon Lightsail browser-based SSH or RDP clients to authenticate an instance. This operation enables the Lightsail browser-based SSH or RDP clients to connect to the instance after a host key mismatch.
Perform this operation only if you were expecting the host key or certificate mismatch or if you are familiar with the new host key or certificate on the instance. For more information, see Troubleshooting connection issues when using the Amazon Lightsail browser-based SSH or RDP client.
Deletes a Lightsail load balancer and all its associated SSL/TLS certificates. Once the load balancer is deleted, you will need to create a new load balancer, create a new certificate, and verify domain ownership again.
The delete load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes an SSL/TLS certificate associated with a Lightsail load balancer.
The DeleteLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes a database in Amazon Lightsail.
The delete relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.
Deletes a database snapshot in Amazon Lightsail.
The delete relational database snapshot
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.
Detaches an SSL/TLS certificate from your Amazon Lightsail content delivery network (CDN) distribution.
After the certificate is detached, your distribution stops accepting traffic for all of the domains that are associated with the certificate.
", - "DetachDisk": "Detaches a stopped block storage disk from a Lightsail instance. Make sure to unmount any file systems on the device within your operating system before stopping the instance and detaching the disk.
The detach disk
operation supports tag-based access control via resource tags applied to the resource identified by disk name
. For more information, see the Lightsail Dev Guide.
Detaches the specified instances from a Lightsail load balancer.
This operation waits until the instances are no longer needed before they are detached from the load balancer.
The detach instances from load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Dev Guide.
Detaches a stopped block storage disk from a Lightsail instance. Make sure to unmount any file systems on the device within your operating system before stopping the instance and detaching the disk.
The detach disk
operation supports tag-based access control via resource tags applied to the resource identified by disk name
. For more information, see the Amazon Lightsail Developer Guide.
Detaches the specified instances from a Lightsail load balancer.
This operation waits until the instances are no longer needed before they are detached from the load balancer.
The detach instances from load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Detaches a static IP from the Amazon Lightsail instance to which it is attached.
", - "DisableAddOn": "Disables an add-on for an Amazon Lightsail resource. For more information, see the Lightsail Dev Guide.
", + "DisableAddOn": "Disables an add-on for an Amazon Lightsail resource. For more information, see the Amazon Lightsail Developer Guide.
", "DownloadDefaultKeyPair": "Downloads the default SSH key pair from the user's account.
", - "EnableAddOn": "Enables or modifies an add-on for an Amazon Lightsail resource. For more information, see the Lightsail Dev Guide.
", - "ExportSnapshot": "Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack
operation to create new Amazon EC2 instances.
Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.
The export snapshot
operation supports tag-based access control via resource tags applied to the resource identified by source snapshot name
. For more information, see the Lightsail Dev Guide.
Use the get instance snapshots
or get disk snapshots
operations to get a list of snapshots that you can export to Amazon EC2.
Enables or modifies an add-on for an Amazon Lightsail resource. For more information, see the Amazon Lightsail Developer Guide.
", + "ExportSnapshot": "Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack
operation to create new Amazon EC2 instances.
Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.
The export snapshot
operation supports tag-based access control via resource tags applied to the resource identified by source snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Use the get instance snapshots
or get disk snapshots
operations to get a list of snapshots that you can export to Amazon EC2.
Returns the names of all active (not deleted) resources.
", "GetAlarms": "Returns information about the configured alarms. Specify an alarm name in your request to return information about a specific alarm, or specify a monitored resource name to return information about all alarms for a specific resource.
An alarm is used to monitor a single metric for one of your resources. When a metric condition is met, the alarm can notify you by email, SMS text message, and a banner displayed on the Amazon Lightsail console. For more information, see Alarms in Amazon Lightsail.
", - "GetAutoSnapshots": "Returns the available automatic snapshots for an instance or disk. For more information, see the Lightsail Dev Guide.
", + "GetAutoSnapshots": "Returns the available automatic snapshots for an instance or disk. For more information, see the Amazon Lightsail Developer Guide.
", "GetBlueprints": "Returns the list of available instance images, or blueprints. You can use a blueprint to create a new instance already running a specific operating system, as well as a preinstalled app or development stack. The software each instance is running depends on the blueprint image you choose.
Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
Returns the existing access key IDs for the specified Amazon Lightsail bucket.
This action does not return the secret access key value of an access key. You can get a secret access key only when you create it from the response of the CreateBucketAccessKey action. If you lose the secret access key, you must create a new access key.
Returns the bundles that you can apply to a Amazon Lightsail bucket.
The bucket bundle specifies the monthly cost, storage quota, and data transfer quota for a bucket.
Use the UpdateBucketBundle action to update the bundle for a bucket.
", + "GetBucketMetricData": "Returns the data points of a specific metric for an Amazon Lightsail bucket.
Metrics report the utilization of a bucket. View and collect metric data regularly to monitor the number of objects stored in a bucket (including object versions) and the storage space used by those objects.
", + "GetBuckets": "Returns information about one or more Amazon Lightsail buckets.
For more information about buckets, see Buckets in Amazon Lightsail in the Amazon Lightsail Developer Guide..
", "GetBundles": "Returns the list of bundles that are available for purchase. A bundle describes the specs for your virtual private server (or instance).
", "GetCertificates": "Returns information about one or more Amazon Lightsail SSL/TLS certificates.
To get a summary of a certificate, ommit includeCertificateDetails
from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.
Returns the CloudFormation stack record created as a result of the create cloud formation stack
operation.
An AWS CloudFormation stack is used to create a new Amazon EC2 instance from an exported Lightsail snapshot.
", @@ -77,15 +85,15 @@ "GetDiskSnapshot": "Returns information about a specific block storage disk snapshot.
", "GetDiskSnapshots": "Returns information about all block storage disk snapshots in your AWS account and region.
", "GetDisks": "Returns information about all block storage disks in your AWS account and region.
", - "GetDistributionBundles": "Returns the list bundles that can be applied to you Amazon Lightsail content delivery network (CDN) distributions.
A distribution bundle specifies the monthly network transfer quota and monthly cost of your dsitribution.
", + "GetDistributionBundles": "Returns the bundles that can be applied to your Amazon Lightsail content delivery network (CDN) distributions.
A distribution bundle specifies the monthly network transfer quota and monthly cost of your dsitribution.
", "GetDistributionLatestCacheReset": "Returns the timestamp and status of the last cache reset of a specific Amazon Lightsail content delivery network (CDN) distribution.
", "GetDistributionMetricData": "Returns the data points of a specific metric for an Amazon Lightsail content delivery network (CDN) distribution.
Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.
", "GetDistributions": "Returns information about one or more of your Amazon Lightsail content delivery network (CDN) distributions.
", "GetDomain": "Returns information about a specific domain recordset.
", "GetDomains": "Returns a list of all domains in the user's account.
", - "GetExportSnapshotRecords": "Returns the export snapshot record created as a result of the export snapshot
operation.
An export snapshot record can be used to create a new Amazon EC2 instance and its related resources with the create cloud formation stack
operation.
Returns all export snapshot records created as a result of the export snapshot
operation.
An export snapshot record can be used to create a new Amazon EC2 instance and its related resources with the CreateCloudFormationStack action.
", "GetInstance": "Returns information about a specific Amazon Lightsail instance, which is a virtual private server.
", - "GetInstanceAccessDetails": "Returns temporary SSH keys you can use to connect to a specific virtual private server, or instance.
The get instance access details
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Lightsail Dev Guide.
Returns temporary SSH keys you can use to connect to a specific virtual private server, or instance.
The get instance access details
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Amazon Lightsail Developer Guide.
Returns the data points for the specified Amazon Lightsail instance metric, given an instance name.
Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.
", "GetInstancePortStates": "Returns the firewall port states for a specific Amazon Lightsail instance, the IP addresses allowed to connect to the instance through the ports, and the protocol.
", "GetInstanceSnapshot": "Returns information about a specific instance snapshot.
", @@ -118,32 +126,35 @@ "GetStaticIps": "Returns information about all static IPs in the user's account.
", "ImportKeyPair": "Imports a public SSH key from a specific key pair.
", "IsVpcPeered": "Returns a Boolean value indicating whether your Lightsail VPC is peered.
", - "OpenInstancePublicPorts": "Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol.
The OpenInstancePublicPorts
action supports tag-based access control via resource tags applied to the resource identified by instanceName
. For more information, see the Lightsail Dev Guide.
Tries to peer the Lightsail VPC with the user's default VPC.
", + "OpenInstancePublicPorts": "Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol.
The OpenInstancePublicPorts
action supports tag-based access control via resource tags applied to the resource identified by instanceName
. For more information, see the Amazon Lightsail Developer Guide.
Peers the Lightsail VPC with the user's default VPC.
", "PutAlarm": "Creates or updates an alarm, and associates it with the specified metric.
An alarm is used to monitor a single metric for one of your resources. When a metric condition is met, the alarm can notify you by email, SMS text message, and a banner displayed on the Amazon Lightsail console. For more information, see Alarms in Amazon Lightsail.
When this action creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA
. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.
When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm. The alarm is then evaluated with the updated configuration.
", - "PutInstancePublicPorts": "Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol. This action also closes all currently open ports that are not included in the request. Include all of the ports and the protocols you want to open in your PutInstancePublicPorts
request. Or use the OpenInstancePublicPorts
action to open ports without closing currently open ports.
The PutInstancePublicPorts
action supports tag-based access control via resource tags applied to the resource identified by instanceName
. For more information, see the Lightsail Dev Guide.
Restarts a specific instance.
The reboot instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Lightsail Dev Guide.
Restarts a specific database in Amazon Lightsail.
The reboot relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.
Registers a container image to your Amazon Lightsail container service.
This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.
Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol. This action also closes all currently open ports that are not included in the request. Include all of the ports and the protocols you want to open in your PutInstancePublicPorts
request. Or use the OpenInstancePublicPorts
action to open ports without closing currently open ports.
The PutInstancePublicPorts
action supports tag-based access control via resource tags applied to the resource identified by instanceName
. For more information, see the Amazon Lightsail Developer Guide.
Restarts a specific instance.
The reboot instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Amazon Lightsail Developer Guide.
Restarts a specific database in Amazon Lightsail.
The reboot relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.
Registers a container image to your Amazon Lightsail container service.
This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Amazon Lightsail Developer Guide.
Deletes a specific static IP from your account.
", "ResetDistributionCache": "Deletes currently cached content from your Amazon Lightsail content delivery network (CDN) distribution.
After resetting the cache, the next time a content request is made, your distribution pulls, serves, and caches it from the origin.
", "SendContactMethodVerification": "Sends a verification request to an email contact method to ensure it's owned by the requester. SMS contact methods don't need to be verified.
A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.
A verification request is sent to the contact method when you initially create it. Use this action to send another verification request if a previous verification request was deleted, or has expired.
Notifications are not sent to an email contact method until after it is verified, and confirmed as valid.
Sets the IP address type for an Amazon Lightsail resource.
Use this action to enable dual-stack for a resource, which enables IPv4 and IPv6 for the specified resource. Alternately, you can use this action to disable dual-stack, and enable IPv4 only.
", - "StartInstance": "Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance
operation.
When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.
The start instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Lightsail Dev Guide.
Starts a specific database from a stopped state in Amazon Lightsail. To restart a database, use the reboot relational database
operation.
The start relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.
Stops a specific Amazon Lightsail instance that is currently running.
When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Lightsail Dev Guide.
The stop instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Lightsail Dev Guide.
Stops a specific database that is currently running in Amazon Lightsail.
The stop relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.
Adds one or more tags to the specified Amazon Lightsail resource. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see the Lightsail Dev Guide.
The tag resource
operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name
. For more information, see the Lightsail Dev Guide.
Sets the Amazon Lightsail resources that can access the specified Lightsail bucket.
Lightsail buckets currently support setting access for Lightsail instances in the same AWS Region.
", + "StartInstance": "Starts a specific Amazon Lightsail instance from a stopped state. To restart an instance, use the reboot instance
operation.
When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Amazon Lightsail Developer Guide.
The start instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Amazon Lightsail Developer Guide.
Starts a specific database from a stopped state in Amazon Lightsail. To restart a database, use the reboot relational database
operation.
The start relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.
Stops a specific Amazon Lightsail instance that is currently running.
When you start a stopped instance, Lightsail assigns a new public IP address to the instance. To use the same IP address after stopping and starting an instance, create a static IP address and attach it to the instance. For more information, see the Amazon Lightsail Developer Guide.
The stop instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Amazon Lightsail Developer Guide.
Stops a specific database that is currently running in Amazon Lightsail.
The stop relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.
Adds one or more tags to the specified Amazon Lightsail resource. Each resource can have a maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see the Amazon Lightsail Developer Guide.
The tag resource
operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name
. For more information, see the Amazon Lightsail Developer Guide.
Tests an alarm by displaying a banner on the Amazon Lightsail console. If a notification trigger is configured for the specified alarm, the test also sends a notification to the notification protocol (Email
and/or SMS
) configured for the alarm.
An alarm is used to monitor a single metric for one of your resources. When a metric condition is met, the alarm can notify you by email, SMS text message, and a banner displayed on the Amazon Lightsail console. For more information, see Alarms in Amazon Lightsail.
", - "UnpeerVpc": "Attempts to unpeer the Lightsail VPC from the user's default VPC.
", - "UntagResource": "Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.
The untag resource
operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name
. For more information, see the Lightsail Dev Guide.
Unpeers the Lightsail VPC from the user's default VPC.
", + "UntagResource": "Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.
The untag resource
operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name
. For more information, see the Amazon Lightsail Developer Guide.
Updates an existing Amazon Lightsail bucket.
Use this action to update the configuration of an existing bucket, such as versioning, public accessibility, and the AWS accounts that can access the bucket.
", + "UpdateBucketBundle": "Updates the bundle, or storage plan, of an existing Amazon Lightsail bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket. You can update a bucket's bundle only one time within a monthly AWS billing cycle. To determine if you can update a bucket's bundle, use the GetBuckets action. The ableToUpdateBundle
parameter in the response will indicate whether you can currently update a bucket's bundle.
Update a bucket's bundle if it's consistently going over its storage space or data transfer quota, or if a bucket's usage is consistently in the lower range of its storage space or data transfer quota. Due to the unpredictable usage fluctuations that a bucket might experience, we strongly recommend that you update a bucket's bundle only as a long-term strategy, instead of as a short-term, monthly cost-cutting measure. Choose a bucket bundle that will provide the bucket with ample storage space and data transfer for a long time to come.
", "UpdateContainerService": "Updates the configuration of your Amazon Lightsail container service, such as its power, scale, and public domain names.
", - "UpdateDistribution": "Updates an existing Amazon Lightsail content delivery network (CDN) distribution.
Use this action to update the configuration of your existing distribution
", + "UpdateDistribution": "Updates an existing Amazon Lightsail content delivery network (CDN) distribution.
Use this action to update the configuration of your existing distribution.
", "UpdateDistributionBundle": "Updates the bundle of your Amazon Lightsail content delivery network (CDN) distribution.
A distribution bundle specifies the monthly network transfer quota and monthly cost of your dsitribution.
Update your distribution's bundle if your distribution is going over its monthly network transfer quota and is incurring an overage fee.
You can update your distribution's bundle only one time within your monthly AWS billing cycle. To determine if you can update your distribution's bundle, use the GetDistributions
action. The ableToUpdateBundle
parameter in the result will indicate whether you can currently update your distribution's bundle.
Updates a domain recordset after it is created.
The update domain entry
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Lightsail Dev Guide.
Updates the specified attribute for a load balancer. You can only update one attribute at a time.
The update load balancer attribute
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Lightsail Dev Guide.
Allows the update of one or more attributes of a database in Amazon Lightsail.
Updates are applied immediately, or in cases where the updates could result in an outage, are applied during the database's predefined maintenance window.
The update relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.
Allows the update of one or more parameters of a database in Amazon Lightsail.
Parameter updates don't cause outages; therefore, their application is not subject to the preferred maintenance window. However, there are two ways in which parameter updates are applied: dynamic
or pending-reboot
. Parameters marked with a dynamic
apply type are applied immediately. Parameters marked with a pending-reboot
apply type are applied only after the database is rebooted using the reboot relational database
operation.
The update relational database parameters
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.
Updates a domain recordset after it is created.
The update domain entry
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Amazon Lightsail Developer Guide.
Updates the specified attribute for a load balancer. You can only update one attribute at a time.
The update load balancer attribute
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Allows the update of one or more attributes of a database in Amazon Lightsail.
Updates are applied immediately, or in cases where the updates could result in an outage, are applied during the database's predefined maintenance window.
The update relational database
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.
Allows the update of one or more parameters of a database in Amazon Lightsail.
Parameter updates don't cause outages; therefore, their application is not subject to the preferred maintenance window. However, there are two ways in which parameter updates are applied: dynamic
or pending-reboot
. Parameters marked with a dynamic
apply type are applied immediately. Parameters marked with a pending-reboot
apply type are applied only after the database is rebooted using the reboot relational database
operation.
The update relational database parameters
operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Amazon Lightsail Developer Guide.
The access direction (inbound
or outbound
).
Lightsail currently supports only inbound
access direction.
Describes an access key for an Amazon Lightsail bucket.
Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the CreateBucketAccessKey action to create an access key for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.
The secretAccessKey
value is returned only in response to the CreateBucketAccessKey
action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.
An object that describes the access key that is created.
" + } + }, + "AccessKeyList": { + "base": null, + "refs": { + "GetBucketAccessKeysResult$accessKeys": "An object that describes the access keys for the specified bucket.
" + } + }, + "AccessReceiverList": { + "base": null, + "refs": { + "Bucket$resourcesReceivingAccess": "An array of objects that describe Lightsail instances that have access to the bucket.
Use the SetResourceAccessForBucket action to update the instances that have access to a bucket.
" + } + }, + "AccessRules": { + "base": "Describes the anonymous access permissions for an Amazon Lightsail bucket and its objects.
For more information about bucket access permissions, see Understanding bucket permissions in Amazon Lightsail in the
Amazon Lightsail Developer Guide.
", + "refs": { + "Bucket$accessRules": "An object that describes the access rules of the bucket.
", + "UpdateBucketRequest$accessRules": "An object that sets the public accessibility of objects in the specified bucket.
" + } + }, + "AccessType": { + "base": null, + "refs": { + "AccessRules$getObject": "Specifies the anonymous access to all objects in a bucket.
The following options can be specified:
public
- Sets all objects in the bucket to public (read-only), making them readable by anyone in the world.
If the getObject
value is set to public
, then all objects in the bucket default to public regardless of the allowPublicOverrides
value.
private
- Sets all objects in the bucket to private, making them readable only by you or anyone you give access to.
If the getObject
value is set to private
, and the allowPublicOverrides
value is set to true
, then all objects in the bucket default to private unless they are configured with a public-read
ACL. Individual objects with a public-read
ACL are readable by anyone in the world.
Lightsail throws this exception when an account is still in the setup in progress state.
", "refs": { @@ -199,7 +242,7 @@ } }, "Alarm": { - "base": "Describes an alarm.
An alarm is a way to monitor your Amazon Lightsail resource metrics. For more information, see Alarms in Amazon Lightsail.
", + "base": "Describes an alarm.
An alarm is a way to monitor your Lightsail resource metrics. For more information, see Alarms in Amazon Lightsail.
", "refs": { "AlarmsList$member": null } @@ -375,6 +418,61 @@ "Blueprint$type": "The type of the blueprint (e.g., os
or app
).
Describes an Amazon Lightsail bucket.
", + "refs": { + "BucketList$member": null, + "CreateBucketResult$bucket": "An object that describes the bucket that is created.
", + "UpdateBucketResult$bucket": "An object that describes the bucket that is updated.
" + } + }, + "BucketBundle": { + "base": "Describes the specifications of a bundle that can be applied to an Amazon Lightsail bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.
", + "refs": { + "BucketBundleList$member": null + } + }, + "BucketBundleList": { + "base": null, + "refs": { + "GetBucketBundlesResult$bundles": "An object that describes bucket bundles.
" + } + }, + "BucketList": { + "base": null, + "refs": { + "GetBucketsResult$buckets": "An array of objects that describe buckets.
" + } + }, + "BucketMetricName": { + "base": null, + "refs": { + "GetBucketMetricDataRequest$metricName": "The metric for which you want to return information.
Valid bucket metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.
These bucket metrics are reported once per day.
BucketSizeBytes
- The amount of data in bytes stored in a bucket. This value is calculated by summing the size of all objects in the bucket (including object versions), including the size of all parts for all incomplete multipart uploads to the bucket.
Statistics: The most useful statistic is Maximum
.
Unit: The published unit is Bytes
.
NumberOfObjects
- The total number of objects stored in a bucket. This value is calculated by counting all objects in the bucket (including object versions) and the total number of parts for all incomplete multipart uploads to the bucket.
Statistics: The most useful statistic is Average
.
Unit: The published unit is Count
.
The name of the metric returned.
" + } + }, + "BucketName": { + "base": null, + "refs": { + "Bucket$name": "The name of the bucket.
", + "CreateBucketAccessKeyRequest$bucketName": "The name of the bucket that the new access key will belong to, and grant access to.
", + "CreateBucketRequest$bucketName": "The name for the bucket.
For more information about bucket names, see Bucket naming rules in Amazon Lightsail in the Amazon Lightsail Developer Guide.
", + "DeleteBucketAccessKeyRequest$bucketName": "The name of the bucket that the access key belongs to.
", + "DeleteBucketRequest$bucketName": "The name of the bucket to delete.
Use the GetBuckets action to get a list of bucket names that you can specify.
", + "GetBucketAccessKeysRequest$bucketName": "The name of the bucket for which to return access keys.
", + "GetBucketMetricDataRequest$bucketName": "The name of the bucket for which to get metric data.
", + "GetBucketsRequest$bucketName": "The name of the bucket for which to return information.
When omitted, the response includes all of your buckets in the AWS Region where the request is made.
", + "SetResourceAccessForBucketRequest$bucketName": "The name of the bucket for which to set access to another Lightsail resource.
", + "UpdateBucketBundleRequest$bucketName": "The name of the bucket for which to update the bundle.
", + "UpdateBucketRequest$bucketName": "The name of the bucket to update.
" + } + }, + "BucketState": { + "base": "Describes the state of an Amazon Lightsail bucket.
", + "refs": { + "Bucket$state": "An object that describes the state of the bucket.
" + } + }, "Bundle": { "base": "Describes a bundle, which is a set of specs describing your virtual private server (or instance).
", "refs": { @@ -470,7 +568,7 @@ } }, "CloudFormationStackRecord": { - "base": "Describes a CloudFormation stack record created as a result of the create cloud formation stack
operation.
A CloudFormation stack record provides information about the AWS CloudFormation stack used to create a new Amazon Elastic Compute Cloud instance from an exported Lightsail instance snapshot.
", + "base": "Describes a CloudFormation stack record created as a result of the create cloud formation stack
action.
A CloudFormation stack record provides information about the AWS CloudFormation stack used to create a new Amazon Elastic Compute Cloud instance from an exported Lightsail instance snapshot.
", "refs": { "CloudFormationStackRecordList$member": null } @@ -781,6 +879,26 @@ "refs": { } }, + "CreateBucketAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "CreateBucketAccessKeyResult": { + "base": null, + "refs": { + } + }, + "CreateBucketRequest": { + "base": null, + "refs": { + } + }, + "CreateBucketResult": { + "base": null, + "refs": { + } + }, "CreateCertificateRequest": { "base": null, "refs": { @@ -1011,6 +1129,26 @@ "refs": { } }, + "DeleteBucketAccessKeyRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketAccessKeyResult": { + "base": null, + "refs": { + } + }, + "DeleteBucketRequest": { + "base": null, + "refs": { + } + }, + "DeleteBucketResult": { + "base": null, + "refs": { + } + }, "DeleteCertificateRequest": { "base": null, "refs": { @@ -1239,7 +1377,7 @@ } }, "Disk": { - "base": "Describes a system disk or a block storage disk.
", + "base": "Describes a block storage disk.
", "refs": { "DiskList$member": null, "GetDiskResult$disk": "An object containing information about the disk.
" @@ -1334,7 +1472,7 @@ } }, "Domain": { - "base": "Describes a domain where you are storing recordsets in Lightsail.
", + "base": "Describes a domain where you are storing recordsets.
", "refs": { "DomainList$member": null, "GetDomainResult$domain": "An array of key-value pairs containing information about your get domain request.
" @@ -1542,6 +1680,46 @@ "refs": { } }, + "GetBucketAccessKeysRequest": { + "base": null, + "refs": { + } + }, + "GetBucketAccessKeysResult": { + "base": null, + "refs": { + } + }, + "GetBucketBundlesRequest": { + "base": null, + "refs": { + } + }, + "GetBucketBundlesResult": { + "base": null, + "refs": { + } + }, + "GetBucketMetricDataRequest": { + "base": null, + "refs": { + } + }, + "GetBucketMetricDataResult": { + "base": null, + "refs": { + } + }, + "GetBucketsRequest": { + "base": null, + "refs": { + } + }, + "GetBucketsResult": { + "base": null, + "refs": { + } + }, "GetBundlesRequest": { "base": null, "refs": { @@ -2107,6 +2285,12 @@ "InstanceAccessDetails$hostKeys": "Describes the public SSH host keys or the RDP certificate.
" } }, + "IAMAccessKeyId": { + "base": null, + "refs": { + "AccessKey$accessKeyId": "The ID of the access key.
" + } + }, "ImportKeyPairRequest": { "base": null, "refs": { @@ -2338,8 +2522,10 @@ "IsoDate": { "base": null, "refs": { + "AccessKey$createdAt": "The timestamp when the access key was created.
", "Alarm$createdAt": "The timestamp when the alarm was created.
", "AutoSnapshotDetails$createdAt": "The timestamp when the automatic snapshot was created.
", + "Bucket$createdAt": "The timestamp when the distribution was created.
", "Certificate$createdAt": "The timestamp when the certificate was created.
", "Certificate$issuedAt": "The timestamp when the certificate was issued.
", "Certificate$notBefore": "The timestamp when the certificate is first valid.
", @@ -2358,6 +2544,8 @@ "Domain$createdAt": "The date when the domain recordset was created.
", "ExportSnapshotRecord$createdAt": "The date when the export snapshot record was created.
", "ExportSnapshotRecordSourceInfo$createdAt": "The date when the source instance or disk snapshot was created.
", + "GetBucketMetricDataRequest$startTime": "The timestamp indicating the earliest data to be returned.
", + "GetBucketMetricDataRequest$endTime": "The timestamp indicating the latest data to be returned.
", "GetContainerLogRequest$startTime": "The start of the time interval for which to get log data.
Constraints:
Specified in Coordinated Universal Time (UTC).
Specified in the Unix time format.
For example, if you wish to use a start time of October 1, 2018, at 8 PM UTC, specify 1538424000
as the start time.
You can convert a human-friendly time to Unix time format using a converter like Epoch converter.
", "GetContainerLogRequest$endTime": "The end of the time interval for which to get log data.
Constraints:
Specified in Coordinated Universal Time (UTC).
Specified in the Unix time format.
For example, if you wish to use an end time of October 1, 2018, at 9 PM UTC, specify 1538427600
as the end time.
You can convert a human-friendly time to Unix time format using a converter like Epoch converter.
", "GetContainerServiceMetricDataRequest$startTime": "The start time of the time period.
", @@ -2408,7 +2596,7 @@ } }, "KeyPair": { - "base": "Describes the SSH key pair.
", + "base": "Describes an SSH key pair.
", "refs": { "CreateKeyPairResult$keyPair": "An array of key-value pairs containing information about the new key pair you just created.
", "GetKeyPairResult$keyPair": "An array of key-value pairs containing information about the key pair.
", @@ -2429,7 +2617,7 @@ } }, "LoadBalancer": { - "base": "Describes the Lightsail load balancer.
", + "base": "Describes a load balancer.
", "refs": { "GetLoadBalancerResult$loadBalancer": "An object containing information about your load balancer.
", "LoadBalancerList$member": null @@ -2579,6 +2767,7 @@ "MetricDatapointList": { "base": null, "refs": { + "GetBucketMetricDataResult$metricData": "An array of objects that describe the metric data returned.
", "GetContainerServiceMetricDataResult$metricData": "An array of objects that describe the metric data returned.
", "GetDistributionMetricDataResult$metricData": "An array of objects that describe the metric data returned.
", "GetInstanceMetricDataResult$metricData": "An array of objects that describe the metric data returned.
", @@ -2597,6 +2786,7 @@ "base": null, "refs": { "Alarm$period": "The period, in seconds, over which the statistic is applied.
", + "GetBucketMetricDataRequest$period": "The granularity, in seconds, of the returned data points.
Bucket storage metrics are reported once per day. Therefore, you should specify a period of 86400 seconds, which is the number of seconds in a day.
The granularity, in seconds, of the returned data points.
All container service metric data is available in 5-minute (300 seconds) granularity.
", "GetDistributionMetricDataRequest$period": "The granularity, in seconds, for the metric data points that will be returned.
", "GetInstanceMetricDataRequest$period": "The granularity, in seconds, of the returned data points.
The StatusCheckFailed
, StatusCheckFailed_Instance
, and StatusCheckFailed_System
instance metric data is available in 1-minute (60 seconds) granularity. All other instance metric data is available in 5-minute (300 seconds) granularity.
The statistic for the metric.
The following statistics are available:
Minimum
- The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.
Maximum
- The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.
Sum
- The sum of all values submitted for the matching metric. You can use this statistic to determine the total volume of a metric.
Average
- The value of Sum
/ SampleCount
during the specified period. By comparing this statistic with the Minimum
and Maximum
values, you can determine the full scope of a metric and how close the average use is to the Minimum
and Maximum
values. This comparison helps you to know when to increase or decrease your resources.
SampleCount
- The count, or number, of data points used for the statistical calculation.
The statistic for the metric.
The following statistics are available:
Minimum
- The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.
Maximum
- The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.
Sum
- All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.
Average
- The value of Sum
/ SampleCount
during the specified period. By comparing this statistic with the Minimum
and Maximum
values, you can determine the full scope of a metric and how close the average use is to the Minimum
and Maximum
values. This comparison helps you to know when to increase or decrease your resources.
SampleCount
- The count, or number, of data points used for the statistical calculation.
The statistic for the metric.
The following statistics are available:
Minimum
- The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.
Maximum
- The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.
Sum
- All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.
Average
- The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.
SampleCount
- The count, or number, of data points used for the statistical calculation.
The statistic for the metric.
The following statistics are available:
Minimum
- The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.
Maximum
- The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.
Sum
- All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.
Average
- The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.
SampleCount
- The count, or number, of data points used for the statistical calculation.
The unit of the metric associated with the alarm.
", + "GetBucketMetricDataRequest$unit": "The unit for the metric data request.
Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName
parameter.
The unit for the metric data request.
Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName
parameter.
The unit for the metric data request. Valid units depend on the metric data being requested. For the valid units to specify with each available metric, see the metricName
parameter.
The unit for the metric data request. Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName
parameter.
The secret access key used to sign requests.
You should store the secret access key in a safe location. We recommend that you delete the access key if the secret access key is compromised.
", "Alarm$arn": "The Amazon Resource Name (ARN) of the alarm.
", "AttachDiskRequest$diskPath": "The disk path to expose to the instance (e.g., /dev/xvdf
).
The name of the Availability Zone. The format is us-east-2a
(case-sensitive).
The state of the Availability Zone.
", "Blueprint$blueprintId": "The ID for the virtual private server image (e.g., app_wordpress_4_4
or app_lamp_7_0
).
The group name of the blueprint (e.g., amazon-linux
).
The Lightsail resource type of the bucket (for example, Bucket
).
The Amazon Resource Name (ARN) of the bucket.
", + "Bucket$bundleId": "The ID of the bundle currently applied to the bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.
Use the UpdateBucketBundle action to change the bundle of a bucket.
", + "Bucket$url": "The URL of the bucket.
", + "Bucket$supportCode": "The support code for a bucket. Include this code in your email to support when you have questions about a Lightsail bucket. This code enables our support team to look up your Lightsail information more easily.
", + "Bucket$objectVersioning": "Indicates whether object versioning is enabled for the bucket.
The following options can be configured:
Enabled
- Object versioning is enabled.
Suspended
- Object versioning was previously enabled but is currently suspended. Existing object versions are retained.
NeverEnabled
- Object versioning has never been enabled.
The ID of the bundle.
", + "BucketBundle$name": "The name of the bundle.
", + "BucketState$code": "The state code of the bucket.
The following codes are possible:
OK
- The bucket is in a running state.
Unknown
- Creation of the bucket might have timed-out. You might want to delete the bucket and create a new one.
The bundle ID (e.g., micro_1_0
).
The HTTP methods that are processed and forwarded to the distribution's origin.
You can specify the following options:
GET,HEAD
- The distribution forwards the GET
and HEAD
methods.
GET,HEAD,OPTIONS
- The distribution forwards the GET
, HEAD
, and OPTIONS
methods.
GET,HEAD,OPTIONS,PUT,PATCH,POST,DELETE
- The distribution forwards the GET
, HEAD
, OPTIONS
, PUT
, PATCH
, POST
, and DELETE
methods.
If you specify the third option, you might need to restrict access to your distribution's origin so users can't perform operations that you don't want them to. For example, you might not want users to have permission to delete objects from your origin.
", "CacheSettings$cachedHTTPMethods": "The HTTP method responses that are cached by your distribution.
You can specify the following options:
GET,HEAD
- The distribution caches responses to the GET
and HEAD
methods.
GET,HEAD,OPTIONS
- The distribution caches responses to the GET
, HEAD
, and OPTIONS
methods.
The destination of the contact method, such as an email address or a mobile phone number.
", "ContactMethod$arn": "The Amazon Resource Name (ARN) of the contact method.
", "ContainerService$arn": "The Amazon Resource Name (ARN) of the container service.
", + "CreateBucketRequest$bundleId": "The ID of the bundle to use for the bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.
Use the GetBucketBundles action to get a list of bundle IDs that you can specify.
Use the UpdateBucketBundle action to change the bundle after the bucket is created.
", "CreateDiskFromSnapshotRequest$availabilityZone": "The Availability Zone where you want to create the disk (e.g., us-east-2a
). Choose the same Availability Zone as the Lightsail instance where you want to create the disk.
Use the GetRegions operation to list the Availability Zones where Lightsail is currently available.
", "CreateDiskRequest$availabilityZone": "The Availability Zone where you want to create the disk (e.g., us-east-2a
). Use the same Availability Zone as the Lightsail instance to which you want to attach the disk.
Use the get regions
operation to list the Availability Zones where Lightsail is currently available.
The bundle of specification information for your virtual private server (or instance), including the pricing plan (e.g., micro_1_0
).
The ID for a virtual private server image (e.g., app_wordpress_4_4
or app_lamp_7_0
). Use the get blueprints
operation to return a list of available images (or blueprints).
Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
The bundle of specification information for your virtual private server (or instance), including the pricing plan (e.g., micro_1_0
).
The ID of the access key to delete.
Use the GetBucketAccessKeys action to get a list of access key IDs that you can specify.
", "DestinationInfo$id": "The ID of the resource created at the destination.
", "DestinationInfo$service": "The destination service of the record.
", "Disk$arn": "The Amazon Resource Name (ARN) of the disk.
", @@ -2718,6 +2922,7 @@ "LoadBalancerTlsCertificateDomainValidationRecord$type": "The type of validation record. For example, CNAME
for domain validation.
The value for that type.
", "Operation$id": "The ID of the operation.
", + "PartnerIdList$member": null, "PendingMaintenanceAction$action": "The type of pending database maintenance action.
", "PendingMaintenanceAction$description": "Additional detail about the pending database maintenance action.
", "RelationalDatabase$arn": "The Amazon Resource Name (ARN) of the database.
", @@ -2737,7 +2942,11 @@ "RelationalDatabaseSnapshot$state": "The state of the database snapshot.
", "RelationalDatabaseSnapshot$fromRelationalDatabaseName": "The name of the source database from which the database snapshot was created.
", "RelationalDatabaseSnapshot$fromRelationalDatabaseArn": "The Amazon Resource Name (ARN) of the database from which the database snapshot was created.
", - "StaticIp$arn": "The Amazon Resource Name (ARN) of the static IP (e.g., arn:aws:lightsail:us-east-2:123456789101:StaticIp/9cbb4a9e-f8e3-4dfe-b57e-12345EXAMPLE
).
The name of the Lightsail instance.
", + "ResourceReceivingAccess$resourceType": "The Lightsail resource type (for example, Instance
).
The Amazon Resource Name (ARN) of the static IP (e.g., arn:aws:lightsail:us-east-2:123456789101:StaticIp/9cbb4a9e-f8e3-4dfe-b57e-12345EXAMPLE
).
The ID of the new bundle to apply to the bucket.
Use the GetBucketBundles action to get a list of bundle IDs that you can specify.
", + "UpdateBucketRequest$versioning": "Specifies whether to enable or suspend versioning of objects in the bucket.
The following options can be specified:
Enabled
- Enables versioning of objects in the specified bucket.
Suspended
- Suspends versioning of objects in the specified bucket. Existing object versions are retained.
An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
These SSL/TLS certificates are only usable by Lightsail load balancers. You can't get the certificate and use it for another purpose.
", "AttachStaticIpResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "CopySnapshotResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", + "CreateBucketAccessKeyResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", + "CreateBucketResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "CreateCertificateResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "CreateCloudFormationStackResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "CreateContactMethodResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", @@ -2818,6 +3029,8 @@ "CreateRelationalDatabaseSnapshotResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "DeleteAlarmResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "DeleteAutoSnapshotResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", + "DeleteBucketAccessKeyResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", + "DeleteBucketResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "DeleteCertificateResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "DeleteContactMethodResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "DeleteDiskResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", @@ -2843,6 +3056,7 @@ "ReleaseStaticIpResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "SendContactMethodVerificationResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "SetIpAddressTypeResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", + "SetResourceAccessForBucketResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "StartInstanceResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "StartRelationalDatabaseResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "StopInstanceResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", @@ -2850,6 +3064,8 @@ "TagResourceResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "TestAlarmResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "UntagResourceResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", + "UpdateBucketBundleResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", + "UpdateBucketResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "UpdateDomainEntryResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "UpdateLoadBalancerAttributeResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", "UpdateRelationalDatabaseParametersResult$operations": "An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.
", @@ -2881,6 +3097,13 @@ "Origin$protocolPolicy": "The protocol that your Amazon Lightsail distribution uses when establishing a connection with your origin to pull content.
" } }, + "PartnerIdList": { + "base": null, + "refs": { + "Bucket$readonlyAccessAccounts": "An array of strings that specify the AWS account IDs that have read-only access to the bucket.
", + "UpdateBucketRequest$readonlyAccessAccounts": "An array of strings to specify the AWS account IDs that can access the bucket.
You can give a maximum of 10 AWS accounts access to a bucket.
" + } + }, "PasswordData": { "base": "The password data for the Windows Server-based instance, including the ciphertext and the key pair name.
", "refs": { @@ -3215,10 +3438,17 @@ "UntagResourceRequest$resourceArn": "The Amazon Resource Name (ARN) of the resource from which you want to remove a tag.
" } }, + "ResourceBucketAccess": { + "base": null, + "refs": { + "SetResourceAccessForBucketRequest$access": "The access setting.
The following access settings are available:
allow
- Allows access to the bucket and its objects.
deny
- Denies access to the bucket and its objects. Use this setting to remove access for a resource previously set to allow
.
Describes the resource location.
", "refs": { "Alarm$location": "An object that lists information about the location of the alarm.
", + "Bucket$location": null, "CloudFormationStackRecord$location": "A list of objects describing the Availability Zone and AWS Region of the CloudFormation stack record.
", "ContactMethod$location": null, "ContainerService$location": "An object that describes the location of the container service, such as the AWS Region and Availability Zone.
", @@ -3321,7 +3551,7 @@ "GetDiskSnapshotRequest$diskSnapshotName": "The name of the disk snapshot (e.g., my-disk-snapshot
).
The name of the distribution for which to return the timestamp of the last cache reset.
Use the GetDistributions
action to get a list of distribution names that you can specify.
When omitted, the response includes the latest cache reset timestamp of all your distributions.
", "GetDistributionMetricDataRequest$distributionName": "The name of the distribution for which to get metric data.
Use the GetDistributions
action to get a list of distribution names that you can specify.
The name of the distribution for which to return information.
Use the GetDistributions
action to get a list of distribution names that you can specify.
When omitted, the response includes all of your distributions in the AWS Region where the request is made.
", + "GetDistributionsRequest$distributionName": "The name of the distribution for which to return information.
When omitted, the response includes all of your distributions in the AWS Region where the request is made.
", "GetInstanceAccessDetailsRequest$instanceName": "The name of the instance to access.
", "GetInstanceMetricDataRequest$instanceName": "The name of the instance for which you want to get metrics data.
", "GetInstancePortStatesRequest$instanceName": "The name of the instance for which to return firewall port states.
", @@ -3375,6 +3605,7 @@ "ResetDistributionCacheRequest$distributionName": "The name of the distribution for which to reset cache.
Use the GetDistributions
action to get a list of distribution names that you can specify.
The name of the resource for which to set the IP address type.
", + "SetResourceAccessForBucketRequest$resourceName": "The name of the Lightsail instance for which to set bucket access. The instance must be in a running or stopped state.
", "StartInstanceRequest$instanceName": "The name of the instance (a virtual private server) to start.
", "StartRelationalDatabaseRequest$relationalDatabaseName": "The name of your database to start.
", "StaticIp$name": "The name of the static IP (e.g., StaticIP-Ohio-EXAMPLE
).
An array of strings containing the names of the instances you want to detach from the load balancer.
" } }, + "ResourceReceivingAccess": { + "base": "Describes an Amazon Lightsail instance that has access to a Lightsail bucket.
", + "refs": { + "AccessReceiverList$member": null + } + }, "ResourceRecord": { "base": "Describes the domain name system (DNS) records to add to your domain's DNS to validate it for an Amazon Lightsail certificate.
", "refs": { @@ -3477,6 +3714,16 @@ "refs": { } }, + "SetResourceAccessForBucketRequest": { + "base": null, + "refs": { + } + }, + "SetResourceAccessForBucketResult": { + "base": null, + "refs": { + } + }, "StartInstanceRequest": { "base": null, "refs": { @@ -3498,7 +3745,7 @@ } }, "StaticIp": { - "base": "Describes the static IP.
", + "base": "Describes a static IP.
", "refs": { "GetStaticIpResult$staticIp": "An array of key-value pairs containing information about the requested static IP.
", "StaticIpList$member": null @@ -3510,6 +3757,12 @@ "GetStaticIpsResult$staticIps": "An array of key-value pairs containing information about your get static IPs request.
" } }, + "StatusType": { + "base": null, + "refs": { + "AccessKey$status": "The status of the access key.
A status of Active
means that the key is valid, while Inactive
means it is not.
Describes a tag key and optional value assigned to an Amazon Lightsail resource.
For more information about tags in Lightsail, see the Lightsail Dev Guide.
", + "base": "Describes a tag key and optional value assigned to an Amazon Lightsail resource.
For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", "refs": { "TagList$member": null } @@ -3590,11 +3843,13 @@ "TagList": { "base": null, "refs": { - "Certificate$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "CertificateSummary$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "ContainerService$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", + "Bucket$tags": "The tag keys and optional values for the bucket. For more information, see Tags in Amazon Lightsail in the Amazon Lightsail Developer Guide.
", + "Certificate$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "CertificateSummary$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "ContainerService$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "CreateBucketRequest$tags": "The tag keys and optional values to add to the bucket during creation.
Use the TagResource action to tag the bucket after it's created.
", "CreateCertificateRequest$tags": "The tag keys and optional values to add to the certificate during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values for the container service.
For more information about tags in Lightsail, see the Lightsail Dev Guide.
", + "CreateContainerServiceRequest$tags": "The tag keys and optional values to add to the certificate during create.
Use the TagResource
action to tag a resource after it's created.
For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", "CreateDiskFromSnapshotRequest$tags": "The tag keys and optional values to add to the resource during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values to add to the resource during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values to add to the resource during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values to add to the resource during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values to add to the resource during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values to add to the resource during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "DiskSnapshot$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "Domain$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "Instance$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "InstanceSnapshot$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "KeyPair$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "LightsailDistribution$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "LoadBalancer$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "LoadBalancerTlsCertificate$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "RelationalDatabase$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", - "RelationalDatabaseSnapshot$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.
", + "Disk$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "DiskSnapshot$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "Domain$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "Instance$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "InstanceSnapshot$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "KeyPair$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "LightsailDistribution$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "LoadBalancer$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "LoadBalancerTlsCertificate$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "RelationalDatabase$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", + "RelationalDatabaseSnapshot$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", "TagResourceRequest$tags": "The tag key and optional value.
" } }, @@ -3689,6 +3944,26 @@ "refs": { } }, + "UpdateBucketBundleRequest": { + "base": null, + "refs": { + } + }, + "UpdateBucketBundleResult": { + "base": null, + "refs": { + } + }, + "UpdateBucketRequest": { + "base": null, + "refs": { + } + }, + "UpdateBucketResult": { + "base": null, + "refs": { + } + }, "UpdateContainerServiceRequest": { "base": null, "refs": { @@ -3762,17 +4037,22 @@ "boolean": { "base": null, "refs": { + "AccessRules$allowPublicOverrides": "A Boolean value that indicates whether the access control list (ACL) permissions that are applied to individual objects override the getObject
option that is currently specified.
When this is true, you can use the PutObjectAcl Amazon S3 API action to set individual objects to public (read-only) using the public-read
ACL, or to private using the private
ACL.
Indicates whether the alarm is enabled.
", "Blueprint$isActive": "A Boolean value indicating whether the blueprint is active. Inactive blueprints are listed to support customers with existing instances but are not necessarily available for launch of new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
", + "Bucket$ableToUpdateBundle": "Indicates whether the bundle that is currently applied to a bucket can be changed to another bundle.
You can update a bucket's bundle only one time within a monthly AWS billing cycle.
Use the UpdateBucketBundle action to change a bucket's bundle.
", + "BucketBundle$isActive": "Indicates whether the bundle is active. Use for a new or existing bucket.
", "Bundle$isActive": "A Boolean value indicating whether the bundle is active.
", "ContainerService$isDisabled": "A Boolean value indicating whether the container service is disabled.
", "ContainerServicePower$isActive": "A Boolean value indicating whether the power is active and can be specified for container services.
", - "CopySnapshotRequest$useLatestRestorableAutoSnapshot": "A Boolean value to indicate whether to use the latest available automatic snapshot of the specified source instance or disk.
Constraints:
This parameter cannot be defined together with the restore date
parameter. The use latest restorable auto snapshot
and restore date
parameters are mutually exclusive.
Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.
A Boolean value to indicate whether to use the latest available automatic snapshot.
Constraints:
This parameter cannot be defined together with the restore date
parameter. The use latest restorable auto snapshot
and restore date
parameters are mutually exclusive.
Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.
A Boolean value to indicate whether to use the latest available automatic snapshot.
Constraints:
This parameter cannot be defined together with the restore date
parameter. The use latest restorable auto snapshot
and restore date
parameters are mutually exclusive.
Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.
A Boolean value to indicate whether to use the latest available automatic snapshot of the specified source instance or disk.
Constraints:
This parameter cannot be defined together with the restore date
parameter. The use latest restorable auto snapshot
and restore date
parameters are mutually exclusive.
Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Amazon Lightsail Developer Guide.
A Boolean value that indicates whether to enable versioning of objects in the bucket.
For more information about versioning, see Enabling and suspending bucket object versioning in Amazon Lightsail in the Amazon Lightsail Developer Guide.
", + "CreateDiskFromSnapshotRequest$useLatestRestorableAutoSnapshot": "A Boolean value to indicate whether to use the latest available automatic snapshot.
Constraints:
This parameter cannot be defined together with the restore date
parameter. The use latest restorable auto snapshot
and restore date
parameters are mutually exclusive.
Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.
A Boolean value to indicate whether to use the latest available automatic snapshot.
Constraints:
This parameter cannot be defined together with the restore date
parameter. The use latest restorable auto snapshot
and restore date
parameters are mutually exclusive.
Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.
Specifies the accessibility options for your new database. A value of true
specifies a database that is available to resources outside of your Lightsail account. A value of false
specifies a database that is available only to your Lightsail resources in the same region as your database.
Specifies whether your database is restored from the latest backup time. A value of true
restores from the latest backup time.
Default: false
Constraints: Cannot be specified if the restore time
parameter is provided.
Specifies the accessibility options for your new database. A value of true
specifies a database that is available to resources outside of your Lightsail account. A value of false
specifies a database that is available only to your Lightsail resources in the same region as your database.
A Boolean value that indicates whether to force delete the bucket.
You must force delete the bucket if it has one of the following conditions:
The bucket is the origin of a distribution.
The bucket has instances that were granted access to it using the SetResourceAccessForBucket action.
The bucket has objects.
The bucket has access keys.
Force deleting a bucket might impact other resources that rely on the bucket, such as instances, distributions, or software that use the issued access keys.
A Boolean value to indicate whether to delete the enabled add-ons for the disk.
", "DeleteInstanceRequest$forceDeleteAddOns": "A Boolean value to indicate whether to delete the enabled add-ons for the disk.
", "DeleteLoadBalancerTlsCertificateRequest$force": "When true
, forces the deletion of an SSL/TLS certificate.
There can be two certificates associated with a Lightsail load balancer: the primary and the backup. The force
parameter is required when the primary SSL/TLS certificate is in use by an instance attached to the load balancer.
A Boolean value indicating whether the disk is attached.
", "DiskInfo$isSystemDisk": "A Boolean value indicating whether this disk is a system disk (has an operating system loaded on it).
", "DiskSnapshot$isFromAutoSnapshot": "A Boolean value indicating whether the snapshot was created from an automatic snapshot.
", - "DistributionBundle$isActive": "Indicates whether the bundle is active, and can be specified for a new distribution.
", + "DistributionBundle$isActive": "Indicates whether the bundle is active, and can be specified for a new or existing distribution.
", "DomainEntry$isAlias": "When true
, specifies whether the domain entry is an alias used by the Lightsail load balancer. You can include an alias (A type) record in your request, which points to a load balancer DNS name and routes traffic to your load balancer.
A Boolean value indicating whether to include inactive results in your request.
", + "GetBucketBundlesRequest$includeInactive": "A Boolean value that indicates whether to include inactive (unavailable) bundles in the response.
", + "GetBucketsRequest$includeConnectedResources": "A Boolean value that indicates whether to include Lightsail instances that were given access to the bucket using the SetResourceAccessForBucket action.
", "GetBundlesRequest$includeInactive": "A Boolean value that indicates whether to include inactive bundle results in your request.
", "GetRegionsRequest$includeAvailabilityZones": "A Boolean value indicating whether to also include Availability Zones in your get regions request. Availability Zones are indicated with a letter: e.g., us-east-2a
.
A Boolean value indicating whether to also include Availability Zones for databases in your get regions request. Availability Zones are indicated with a letter (e.g., us-east-2a
).
The monthly price of the bundle, in US dollars.
", "Bundle$price": "The price in US dollars (e.g., 5.0
) of the bundle.
The amount of RAM in GB (e.g., 2.0
).
The monthly price of the power in USD.
", @@ -3850,6 +4133,8 @@ "Alarm$datapointsToAlarm": "The number of data points that must not within the specified threshold to trigger the alarm.
", "AttachedDisk$sizeInGb": "The size of the disk in GB.
", "Blueprint$minPower": "The minimum bundle power required to run this blueprint. For example, you need a bundle with a power value of 500 or more to create an instance that uses a blueprint with a minimum power value of 500. 0
indicates that the blueprint runs on all instance sizes.
The storage size of the bundle, in GB.
", + "BucketBundle$transferPerMonthInGb": "The monthly network transfer quota of the bundle.
", "Bundle$cpuCount": "The number of vCPUs included in the bundle (e.g., 2
).
The size of the SSD (e.g., 30
).
A numeric value that represents the power of the bundle (e.g., 500
). You can use the bundle's power value in conjunction with a blueprint's minimum power value to determine whether the blueprint will run on the bundle. For example, you need a bundle with a power value of 500 or more to create an instance that uses a blueprint with a minimum power value of 500.
The version code.
", "Blueprint$productUrl": "The product URL to learn more about the image or blueprint.
", "Blueprint$licenseUrl": "The end-user license agreement URL for the image or blueprint.
", + "BucketState$message": "A message that describes the state of the bucket.
", "Bundle$instanceType": "The Amazon EC2 instance type (e.g., t2.micro
).
A friendly name for the bundle (e.g., Micro
).
The path to a directory or file to cached, or not cache. Use an asterisk symbol to specify wildcard directories (path/to/assets/*
), and file types (*.html, *jpg, *js
). Directories and file paths are case-sensitive.
Examples:
Specify the following to cache all files in the document root of an Apache web server running on a Lightsail instance.
var/www/html/
Specify the following file to cache only the index page in the document root of an Apache web server.
var/www/html/index.html
Specify the following to cache only the .html files in the document root of an Apache web server.
var/www/html/*.html
Specify the following to cache only the .jpg, .png, and .gif files in the images sub-directory of the document root of an Apache web server.
var/www/html/images/*.jpg
var/www/html/images/*.png
var/www/html/images/*.gif
Specify the following to cache all files in the images sub-directory of the document root of an Apache web server.
var/www/html/images/
The publicly accessible URL of the container service.
If no public endpoint is specified in the currentDeployment
, this URL returns a 404 response.
The name of the container entry of the deployment that the endpoint configuration applies to.
", "ContainerServiceHealthCheckConfig$path": "The path on the container on which to perform the health check. The default value is /
.
The HTTP codes to use when checking for a successful response from a container. You can specify values between 200 and 499.
", + "ContainerServiceHealthCheckConfig$successCodes": "The HTTP codes to use when checking for a successful response from a container. You can specify values between 200
and 499
. You can specify multiple values (for example, 200,202
) or a range of values (for example, 200-299
).
The message of the container service log event.
", "ContainerServiceMetadataEntry$key": null, "ContainerServiceMetadataEntry$value": null, @@ -3942,17 +4228,17 @@ "ContainerServiceRegistryLogin$password": "The container service registry password to use to push container images to the container image registry of a Lightsail account
", "ContainerServiceRegistryLogin$registry": "The address to use to push container images to the container image registry of a Lightsail account.
", "ContainerServiceStateDetail$message": "A message that provides more information for the state code.
The state detail is populated only when a container service is in a PENDING
, DEPLOYING
, or UPDATING
state.
The name of the source instance or disk from which the source automatic snapshot was created.
Constraint:
Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.
The date of the source automatic snapshot to copy. Use the get auto snapshots
operation to identify the dates of the available automatic snapshots.
Constraints:
Must be specified in YYYY-MM-DD
format.
This parameter cannot be defined together with the use latest restorable auto snapshot
parameter. The restore date
and use latest restorable auto snapshot
parameters are mutually exclusive.
Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide.
The name of the source disk from which the source automatic snapshot was created.
Constraints:
This parameter cannot be defined together with the disk snapshot name
parameter. The source disk name
and disk snapshot name
parameters are mutually exclusive.
Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.
The date of the automatic snapshot to use for the new disk. Use the get auto snapshots
operation to identify the dates of the available automatic snapshots.
Constraints:
Must be specified in YYYY-MM-DD
format.
This parameter cannot be defined together with the use latest restorable auto snapshot
parameter. The restore date
and use latest restorable auto snapshot
parameters are mutually exclusive.
Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Lightsail Dev Guide.
The name of the source instance or disk from which the source automatic snapshot was created.
Constraint:
Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Amazon Lightsail Developer Guide.
The date of the source automatic snapshot to copy. Use the get auto snapshots
operation to identify the dates of the available automatic snapshots.
Constraints:
Must be specified in YYYY-MM-DD
format.
This parameter cannot be defined together with the use latest restorable auto snapshot
parameter. The restore date
and use latest restorable auto snapshot
parameters are mutually exclusive.
Define this parameter only when copying an automatic snapshot as a manual snapshot. For more information, see the Amazon Lightsail Developer Guide.
The name of the source disk from which the source automatic snapshot was created.
Constraints:
This parameter cannot be defined together with the disk snapshot name
parameter. The source disk name
and disk snapshot name
parameters are mutually exclusive.
Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.
The date of the automatic snapshot to use for the new disk. Use the get auto snapshots
operation to identify the dates of the available automatic snapshots.
Constraints:
Must be specified in YYYY-MM-DD
format.
This parameter cannot be defined together with the use latest restorable auto snapshot
parameter. The restore date
and use latest restorable auto snapshot
parameters are mutually exclusive.
Define this parameter only when creating a new disk from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.
The bundle ID to use for the distribution.
A distribution bundle describes the specifications of your distribution, such as the monthly cost and monthly network transfer quota.
Use the GetDistributionBundles
action to get a list of distribution bundle IDs that you can specify.
The Availability Zone where you want to create your instances. Use the following formatting: us-east-2a
(case sensitive). You can get a list of Availability Zones by using the get regions operation. Be sure to add the include Availability Zones
parameter to your request.
You can create a launch script that configures a server with additional user data. For example, apt-get -y update
.
Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum
, Debian and Ubuntu use apt-get
, and FreeBSD uses pkg
. For a complete list, see the Dev Guide.
The name of the source instance from which the source automatic snapshot was created.
Constraints:
This parameter cannot be defined together with the instance snapshot name
parameter. The source instance name
and instance snapshot name
parameters are mutually exclusive.
Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.
The date of the automatic snapshot to use for the new instance. Use the get auto snapshots
operation to identify the dates of the available automatic snapshots.
Constraints:
Must be specified in YYYY-MM-DD
format.
This parameter cannot be defined together with the use latest restorable auto snapshot
parameter. The restore date
and use latest restorable auto snapshot
parameters are mutually exclusive.
Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Lightsail Dev Guide.
You can create a launch script that configures a server with additional user data. For example, apt-get -y update
.
Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum
, Debian and Ubuntu use apt-get
, and FreeBSD uses pkg
. For a complete list, see the Amazon Lightsail Developer Guide.
The name of the source instance from which the source automatic snapshot was created.
Constraints:
This parameter cannot be defined together with the instance snapshot name
parameter. The source instance name
and instance snapshot name
parameters are mutually exclusive.
Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.
The date of the automatic snapshot to use for the new instance. Use the get auto snapshots
operation to identify the dates of the available automatic snapshots.
Constraints:
Must be specified in YYYY-MM-DD
format.
This parameter cannot be defined together with the use latest restorable auto snapshot
parameter. The restore date
and use latest restorable auto snapshot
parameters are mutually exclusive.
Define this parameter only when creating a new instance from an automatic snapshot. For more information, see the Amazon Lightsail Developer Guide.
The Availability Zone in which to create your instance. Use the following format: us-east-2a
(case sensitive). You can get a list of Availability Zones by using the get regions operation. Be sure to add the include Availability Zones
parameter to your request.
A launch script you can create that configures a server with additional user data. For example, you might want to run apt-get -y update
.
Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum
, Debian and Ubuntu use apt-get
, and FreeBSD uses pkg
. For a complete list, see the Dev Guide.
A launch script you can create that configures a server with additional user data. For example, you might want to run apt-get -y update
.
Depending on the machine image you choose, the command to get software on your instance varies. Amazon Linux and CentOS use yum
, Debian and Ubuntu use apt-get
, and FreeBSD uses pkg
. For a complete list, see the Amazon Lightsail Developer Guide.
The path you provided to perform the load balancer health check. If you didn't specify a health check path, Lightsail uses the root path of your website (e.g., \"/\"
).
You may want to specify a custom health check path other than the root of your application if your home page loads slowly or has a lot of media or scripting on it.
", "CreateRelationalDatabaseFromSnapshotRequest$availabilityZone": "The Availability Zone in which to create your new database. Use the us-east-2a
case-sensitive format.
You can get a list of Availability Zones by using the get regions
operation. Be sure to add the include relational database Availability Zones
parameter to your request.
The bundle ID for your new database. A bundle describes the performance specifications for your database.
You can get a list of database bundle IDs by using the get relational database bundles
operation.
When creating a new database from a snapshot, you cannot choose a bundle that is smaller than the bundle of the source database.
", @@ -3984,6 +4270,8 @@ "GetAlarmsResult$nextPageToken": "The token to advance to the next page of results from your request.
A next page token is not returned if there are no more results to display.
To get the next page of results, perform another GetAlarms
request and specify the next page token using the pageToken
parameter.
The token to advance to the next page of results from your request.
To get a page token, perform an initial GetBlueprints
request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.
The token to advance to the next page of results from your request.
A next page token is not returned if there are no more results to display.
To get the next page of results, perform another GetBlueprints
request and specify the next page token using the pageToken
parameter.
The token to advance to the next page of results from your request.
To get a page token, perform an initial GetBuckets
request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.
The token to advance to the next page of results from your request.
A next page token is not returned if there are no more results to display.
To get the next page of results, perform another GetBuckets
request and specify the next page token using the pageToken
parameter.
The token to advance to the next page of results from your request.
To get a page token, perform an initial GetBundles
request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.
The token to advance to the next page of results from your request.
A next page token is not returned if there are no more results to display.
To get the next page of results, perform another GetBundles
request and specify the next page token using the pageToken
parameter.
The token to advance to the next page of results from your request.
To get a page token, perform an initial GetClouFormationStackRecords
request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.
Delete an existing workload.
", "DeleteWorkloadShare": "Delete a workload share.
", "DisassociateLenses": "Disassociate a lens from a workload.
The AWS Well-Architected Framework lens (wellarchitected
) cannot be removed from a workload.
Get lens review.
", + "GetAnswer": "Get the answer to a specific question in a workload review.
", "GetLensReview": "Get lens review.
", "GetLensReviewReport": "Get lens review report.
", "GetLensVersionDifference": "Get lens version differences.
", @@ -26,7 +26,7 @@ "ListWorkloadShares": "List the workload shares associated with the workload.
", "ListWorkloads": "List workloads. Paginated.
", "TagResource": "Adds one or more tags to the specified resource.
", - "UntagResource": "Deletes specified tags from a resource.
", + "UntagResource": "Deletes specified tags from a resource.
To specify multiple tags, use separate tagKeys parameters, for example:
DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2
Update the answer to a specific question in a workload review.
", "UpdateLensReview": "Update lens review.
", "UpdateShareInvitation": "Update a workload invitation.
", @@ -47,6 +47,14 @@ "UpdateAnswerOutput$Answer": null } }, + "AnswerReason": { + "base": null, + "refs": { + "Answer$Reason": "The reason why the question is not applicable to your workload.
", + "AnswerSummary$Reason": "The reason why a choice is non-applicable to a question in your workload.
", + "UpdateAnswerInput$Reason": "The reason why a question is not applicable to your workload.
" + } + }, "AnswerSummaries": { "base": "List of answer summaries of lens review in a workload.
", "refs": { @@ -92,6 +100,30 @@ "Choices$member": null } }, + "ChoiceAnswer": { + "base": "A choice that has been answered on a question in your workload.
", + "refs": { + "ChoiceAnswers$member": null + } + }, + "ChoiceAnswerSummaries": { + "base": null, + "refs": { + "AnswerSummary$ChoiceAnswerSummaries": "A list of selected choices to a question in your workload.
" + } + }, + "ChoiceAnswerSummary": { + "base": "A choice summary that has been answered on a question in your workload.
", + "refs": { + "ChoiceAnswerSummaries$member": null + } + }, + "ChoiceAnswers": { + "base": null, + "refs": { + "Answer$ChoiceAnswers": "A list of selected choices to a question in your workload.
" + } + }, "ChoiceDescription": { "base": "The description of a choice.
", "refs": { @@ -102,15 +134,53 @@ "base": "The ID of a choice.
", "refs": { "Choice$ChoiceId": null, + "ChoiceAnswer$ChoiceId": null, + "ChoiceAnswerSummary$ChoiceId": null, + "ChoiceUpdates$key": null, "SelectedChoices$member": null } }, + "ChoiceNotes": { + "base": null, + "refs": { + "ChoiceAnswer$Notes": "The notes associated with a choice.
", + "ChoiceUpdate$Notes": "The notes associated with a choice.
" + } + }, + "ChoiceReason": { + "base": null, + "refs": { + "ChoiceAnswer$Reason": "The reason why a choice is non-applicable to a question in your workload.
", + "ChoiceAnswerSummary$Reason": "The reason why a choice is non-applicable to a question in your workload.
", + "ChoiceUpdate$Reason": "The reason why a choice is non-applicable to a question in your workload.
" + } + }, + "ChoiceStatus": { + "base": null, + "refs": { + "ChoiceAnswer$Status": "The status of a choice.
", + "ChoiceAnswerSummary$Status": "The status of a choice.
", + "ChoiceUpdate$Status": "The status of a choice.
" + } + }, "ChoiceTitle": { "base": "The title of a choice.
", "refs": { "Choice$Title": null } }, + "ChoiceUpdate": { + "base": "A list of choices to be updated.
", + "refs": { + "ChoiceUpdates$value": null + } + }, + "ChoiceUpdates": { + "base": null, + "refs": { + "UpdateAnswerInput$ChoiceUpdates": "A list of choices to update on a question in your workload. The String key corresponds to the choice ID to be updated.
" + } + }, "Choices": { "base": "List of choices available for a question.
", "refs": { @@ -288,7 +358,7 @@ } }, "ImprovementPlanUrl": { - "base": "The improvement plan URL for a question.
", + "base": "The improvement plan URL for a question.
This value is only available if the question has been answered.
", "refs": { "Answer$ImprovementPlanUrl": null, "ImprovementSummary$ImprovementPlanUrl": null @@ -908,7 +978,7 @@ "TagKeyList": { "base": null, "refs": { - "UntagResourceInput$TagKeys": "The keys of the tags to be removed.
" + "UntagResourceInput$TagKeys": "A list of tag keys. Existing tags of the resource whose keys are members of this list are removed from the resource.
" } }, "TagMap": { diff --git a/service/acm/api.go b/service/acm/api.go index 99c4e3ef81c..efae96c9114 100644 --- a/service/acm/api.go +++ b/service/acm/api.go @@ -59,9 +59,10 @@ func (c *ACM) AddTagsToCertificateRequest(input *AddTagsToCertificateInput) (req // AddTagsToCertificate API operation for AWS Certificate Manager. // // Adds one or more tags to an ACM certificate. Tags are labels that you can -// use to identify and organize your AWS resources. Each tag consists of a key -// and an optional value. You specify the certificate on input by its Amazon -// Resource Name (ARN). You specify the tag by using a key-value pair. +// use to identify and organize your Amazon Web Services resources. Each tag +// consists of a key and an optional value. You specify the certificate on input +// by its Amazon Resource Name (ARN). You specify the tag by using a key-value +// pair. // // You can apply a tag to just one certificate if you want to identify a specific // characteristic of that certificate, or you can apply the same tag to multiple @@ -177,12 +178,12 @@ func (c *ACM) DeleteCertificateRequest(input *DeleteCertificateInput) (req *requ // Deletes a certificate and its associated private key. If this action succeeds, // the certificate no longer appears in the list that can be displayed by calling // the ListCertificates action or be retrieved by calling the GetCertificate -// action. The certificate will not be available for use by AWS services integrated -// with ACM. +// action. The certificate will not be available for use by Amazon Web Services +// services integrated with ACM. // -// You cannot delete an ACM certificate that is being used by another AWS service. -// To delete a certificate that is in use, the certificate association must -// first be removed. +// You cannot delete an ACM certificate that is being used by another Amazon +// Web Services service. To delete a certificate that is in use, the certificate +// association must first be removed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -197,8 +198,8 @@ func (c *ACM) DeleteCertificateRequest(input *DeleteCertificateInput) (req *requ // caller's account cannot be found. // // * ResourceInUseException -// The certificate is in use by another AWS service in the caller's account. -// Remove the association and try again. +// The certificate is in use by another Amazon Web Services service in the caller's +// account. Remove the association and try again. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -446,7 +447,8 @@ func (c *ACM) GetAccountConfigurationRequest(input *GetAccountConfigurationInput // GetAccountConfiguration API operation for AWS Certificate Manager. // -// Returns the account configuration options associated with an AWS account. +// Returns the account configuration options associated with an Amazon Web Services +// account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -619,14 +621,15 @@ func (c *ACM) ImportCertificateRequest(input *ImportCertificateInput) (req *requ // ImportCertificate API operation for AWS Certificate Manager. // -// Imports a certificate into AWS Certificate Manager (ACM) to use with services -// that are integrated with ACM. Note that integrated services (https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html) -// allow only certificate types and keys they support to be associated with -// their resources. Further, their support differs depending on whether the -// certificate is imported into IAM or into ACM. For more information, see the -// documentation for each service. For more information about importing certificates -// into ACM, see Importing Certificates (https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) -// in the AWS Certificate Manager User Guide. +// Imports a certificate into Amazon Web Services Certificate Manager (ACM) +// to use with services that are integrated with ACM. Note that integrated services +// (https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html) allow +// only certificate types and keys they support to be associated with their +// resources. Further, their support differs depending on whether the certificate +// is imported into IAM or into ACM. For more information, see the documentation +// for each service. For more information about importing certificates into +// ACM, see Importing Certificates (https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) +// in the Amazon Web Services Certificate Manager User Guide. // // ACM does not provide managed renewal (https://docs.aws.amazon.com/acm/latest/userguide/acm-renewal.html) // for certificates that you import. @@ -1018,7 +1021,8 @@ func (c *ACM) PutAccountConfigurationRequest(input *PutAccountConfigurationInput // // Returned Error Types: // * ValidationException -// The supplied input failed to satisfy constraints of an AWS service. +// The supplied input failed to satisfy constraints of an Amazon Web Services +// service. // // * ThrottlingException // The request was denied because it exceeded a quota. @@ -1289,10 +1293,10 @@ func (c *ACM) RequestCertificateRequest(input *RequestCertificateInput) (req *re // RequestCertificate API operation for AWS Certificate Manager. // -// Requests an ACM certificate for use with other AWS services. To request an -// ACM certificate, you must specify a fully qualified domain name (FQDN) in -// the DomainName parameter. You can also specify additional FQDNs in the SubjectAlternativeNames -// parameter. +// Requests an ACM certificate for use with other Amazon Web Services services. +// To request an ACM certificate, you must specify a fully qualified domain +// name (FQDN) in the DomainName parameter. You can also specify additional +// FQDNs in the SubjectAlternativeNames parameter. // // If you are requesting a private certificate, domain validation is not required. // If you are requesting a public certificate, each domain name that you specify @@ -1302,6 +1306,11 @@ func (c *ACM) RequestCertificateRequest(input *RequestCertificateInput) (req *re // We recommend that you use DNS validation. ACM issues public certificates // after receiving approval from the domain owner. // +// ACM behavior differs from the https://tools.ietf.org/html/rfc6125#appendix-B.2 +// (https://tools.ietf.org/html/rfc6125#appendix-B.2)RFC 6125 specification +// of the certificate validation process. first checks for a subject alternative +// name, and, if it finds one, ignores the common name (CN) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1697,7 +1706,7 @@ type CertificateDetail struct { // The Amazon Resource Name (ARN) of the certificate. For more information about // ARNs, see Amazon Resource Names (ARNs) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. + // in the Amazon Web Services General Reference. CertificateArn *string `min:"20" type:"string"` // The Amazon Resource Name (ARN) of the ACM PCA private certificate authority @@ -1726,15 +1735,15 @@ type CertificateDetail struct { // The reason the certificate request failed. This value exists only when the // certificate status is FAILED. For more information, see Certificate Request // Failed (https://docs.aws.amazon.com/acm/latest/userguide/troubleshooting.html#troubleshooting-failed) - // in the AWS Certificate Manager User Guide. + // in the Amazon Web Services Certificate Manager User Guide. FailureReason *string `type:"string" enum:"FailureReason"` // The date and time at which the certificate was imported. This value exists // only when the certificate type is IMPORTED. ImportedAt *time.Time `type:"timestamp"` - // A list of ARNs for the AWS resources that are using the certificate. A certificate - // can be used by multiple AWS resources. + // A list of ARNs for the Amazon Web Services resources that are using the certificate. + // A certificate can be used by multiple Amazon Web Services resources. InUseBy []*string `type:"list"` // The time at which the certificate was issued. This value exists only when @@ -1810,7 +1819,7 @@ type CertificateDetail struct { // for imported certificates. For more information about the differences between // certificates that you import and those that ACM provides, see Importing Certificates // (https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) - // in the AWS Certificate Manager User Guide. + // in the Amazon Web Services Certificate Manager User Guide. Type *string `type:"string" enum:"CertificateType"` } @@ -2395,7 +2404,8 @@ func (s *DomainValidationOption) SetValidationDomain(v string) *DomainValidation return s } -// Object containing expiration events options associated with an AWS account. +// Object containing expiration events options associated with an Amazon Web +// Services account. type ExpiryEventsConfiguration struct { _ struct{} `type:"structure"` @@ -2665,7 +2675,8 @@ func (s GetAccountConfigurationInput) GoString() string { type GetAccountConfigurationOutput struct { _ struct{} `type:"structure"` - // Expiration events configuration options associated with the AWS account. + // Expiration events configuration options associated with the Amazon Web Services + // account. ExpiryEvents *ExpiryEventsConfiguration `type:"structure"` } @@ -3784,8 +3795,9 @@ type RequestCertificateInput struct { // that will be used to issue the certificate. If you do not provide an ARN // and you are trying to request a private certificate, ACM will attempt to // issue a public certificate. For more information about private CAs, see the - // AWS Certificate Manager Private Certificate Authority (PCA) (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaWelcome.html) - // user guide. The ARN must have the following form: + // Amazon Web Services Certificate Manager Private Certificate Authority (PCA) + // (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaWelcome.html) user + // guide. The ARN must have the following form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 CertificateAuthorityArn *string `min:"20" type:"string"` @@ -4159,8 +4171,8 @@ func (s ResendValidationEmailOutput) GoString() string { return s.String() } -// The certificate is in use by another AWS service in the caller's account. -// Remove the association and try again. +// The certificate is in use by another Amazon Web Services service in the caller's +// account. Remove the association and try again. type ResourceInUseException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -4273,8 +4285,8 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } -// Contains a DNS record value that you can use to can use to validate ownership -// or control of a domain. This is used by the DescribeCertificate action. +// Contains a DNS record value that you can use to validate ownership or control +// of a domain. This is used by the DescribeCertificate action. type ResourceRecord struct { _ struct{} `type:"structure"` @@ -4618,7 +4630,8 @@ func (s UpdateCertificateOptionsOutput) GoString() string { return s.String() } -// The supplied input failed to satisfy constraints of an AWS service. +// The supplied input failed to satisfy constraints of an Amazon Web Services +// service. type ValidationException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -4899,11 +4912,14 @@ func FailureReason_Values() []string { } const ( + // KeyAlgorithmRsa1024 is a KeyAlgorithm enum value + KeyAlgorithmRsa1024 = "RSA_1024" + // KeyAlgorithmRsa2048 is a KeyAlgorithm enum value KeyAlgorithmRsa2048 = "RSA_2048" - // KeyAlgorithmRsa1024 is a KeyAlgorithm enum value - KeyAlgorithmRsa1024 = "RSA_1024" + // KeyAlgorithmRsa3072 is a KeyAlgorithm enum value + KeyAlgorithmRsa3072 = "RSA_3072" // KeyAlgorithmRsa4096 is a KeyAlgorithm enum value KeyAlgorithmRsa4096 = "RSA_4096" @@ -4921,8 +4937,9 @@ const ( // KeyAlgorithm_Values returns all elements of the KeyAlgorithm enum func KeyAlgorithm_Values() []string { return []string{ - KeyAlgorithmRsa2048, KeyAlgorithmRsa1024, + KeyAlgorithmRsa2048, + KeyAlgorithmRsa3072, KeyAlgorithmRsa4096, KeyAlgorithmEcPrime256v1, KeyAlgorithmEcSecp384r1, diff --git a/service/acm/doc.go b/service/acm/doc.go index 64662de3d22..c53bf06d634 100644 --- a/service/acm/doc.go +++ b/service/acm/doc.go @@ -3,9 +3,10 @@ // Package acm provides the client and types for making API // requests to AWS Certificate Manager. // -// You can use AWS Certificate Manager (ACM) to manage SSL/TLS certificates -// for your AWS-based websites and applications. For more information about -// using ACM, see the AWS Certificate Manager User Guide (https://docs.aws.amazon.com/acm/latest/userguide/). +// You can use Amazon Web Services Certificate Manager (ACM) to manage SSL/TLS +// certificates for your Amazon Web Services-based websites and applications. +// For more information about using ACM, see the Amazon Web Services Certificate +// Manager User Guide (https://docs.aws.amazon.com/acm/latest/userguide/). // // See https://docs.aws.amazon.com/goto/WebAPI/acm-2015-12-08 for more information on this service. // diff --git a/service/acm/errors.go b/service/acm/errors.go index ffc3e8c72dd..fd8c9ff1dd6 100644 --- a/service/acm/errors.go +++ b/service/acm/errors.go @@ -74,8 +74,8 @@ const ( // ErrCodeResourceInUseException for service response error code // "ResourceInUseException". // - // The certificate is in use by another AWS service in the caller's account. - // Remove the association and try again. + // The certificate is in use by another Amazon Web Services service in the caller's + // account. Remove the association and try again. ErrCodeResourceInUseException = "ResourceInUseException" // ErrCodeResourceNotFoundException for service response error code @@ -106,7 +106,8 @@ const ( // ErrCodeValidationException for service response error code // "ValidationException". // - // The supplied input failed to satisfy constraints of an AWS service. + // The supplied input failed to satisfy constraints of an Amazon Web Services + // service. ErrCodeValidationException = "ValidationException" ) diff --git a/service/databasemigrationservice/api.go b/service/databasemigrationservice/api.go index 0d44c0be1f0..abf7328071e 100644 --- a/service/databasemigrationservice/api.go +++ b/service/databasemigrationservice/api.go @@ -58,10 +58,10 @@ func (c *DatabaseMigrationService) AddTagsToResourceRequest(input *AddTagsToReso // AddTagsToResource API operation for AWS Database Migration Service. // -// Adds metadata tags to an AWS DMS resource, including replication instance, -// endpoint, security group, and migration task. These tags can also be used -// with cost allocation reporting to track cost associated with DMS resources, -// or used in a Condition statement in an IAM policy for DMS. For more information, +// Adds metadata tags to an DMS resource, including replication instance, endpoint, +// security group, and migration task. These tags can also be used with cost +// allocation reporting to track cost associated with DMS resources, or used +// in a Condition statement in an IAM policy for DMS. For more information, // see Tag (https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) // data type description. // @@ -237,8 +237,7 @@ func (c *DatabaseMigrationService) CancelReplicationTaskAssessmentRunRequest(inp // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceNotFoundFault // The resource could not be found. @@ -315,6 +314,13 @@ func (c *DatabaseMigrationService) CreateEndpointRequest(input *CreateEndpointIn // // Creates an endpoint using the provided settings. // +// For a MySQL source or target endpoint, don't explicitly specify the database +// using the DatabaseName request parameter on the CreateEndpoint API call. +// Specifying DatabaseName when you create a MySQL endpoint replicates all the +// task tables to this single database. For MySQL endpoints, you specify the +// database only when you specify the schema in the table-mapping rules of the +// DMS task. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -324,7 +330,7 @@ func (c *DatabaseMigrationService) CreateEndpointRequest(input *CreateEndpointIn // // Returned Error Types: // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // * ResourceAlreadyExistsFault // The resource you are attempting to create already exists. @@ -340,8 +346,7 @@ func (c *DatabaseMigrationService) CreateEndpointRequest(input *CreateEndpointIn // The resource could not be found. // // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * S3AccessDeniedFault // Insufficient privileges are preventing access to an Amazon S3 object. @@ -412,23 +417,23 @@ func (c *DatabaseMigrationService) CreateEventSubscriptionRequest(input *CreateE // CreateEventSubscription API operation for AWS Database Migration Service. // -// Creates an AWS DMS event notification subscription. +// Creates an DMS event notification subscription. // // You can specify the type of source (SourceType) you want to be notified of, -// provide a list of AWS DMS source IDs (SourceIds) that triggers the events, -// and provide a list of event categories (EventCategories) for events you want +// provide a list of DMS source IDs (SourceIds) that triggers the events, and +// provide a list of event categories (EventCategories) for events you want // to be notified of. If you specify both the SourceType and SourceIds, such // as SourceType = replication-instance and SourceIdentifier = my-replinstance, // you will be notified of all the replication instance events for the specified // source. If you specify a SourceType but don't specify a SourceIdentifier, -// you receive notice of the events for that source type for all your AWS DMS -// sources. If you don't specify either SourceType nor SourceIdentifier, you -// will be notified of events generated from all AWS DMS sources belonging to -// your customer account. +// you receive notice of the events for that source type for all your DMS sources. +// If you don't specify either SourceType nor SourceIdentifier, you will be +// notified of events generated from all DMS sources belonging to your customer +// account. // -// For more information about AWS DMS events, see Working with Events and Notifications +// For more information about DMS events, see Working with Events and Notifications // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) in the -// AWS Database Migration Service User Guide. +// Database Migration Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -461,13 +466,13 @@ func (c *DatabaseMigrationService) CreateEventSubscriptionRequest(input *CreateE // The specified master key (CMK) isn't enabled. // // * KMSInvalidStateFault -// The state of the specified AWS KMS resource isn't valid for this request. +// The state of the specified KMS resource isn't valid for this request. // // * KMSNotFoundFault -// The specified AWS KMS entity or resource can't be found. +// The specified KMS entity or resource can't be found. // // * KMSThrottlingFault -// This request triggered AWS KMS request throttling. +// This request triggered KMS request throttling. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CreateEventSubscription func (c *DatabaseMigrationService) CreateEventSubscription(input *CreateEventSubscriptionInput) (*CreateEventSubscriptionOutput, error) { @@ -537,12 +542,11 @@ func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *Creat // // Creates the replication instance using the specified parameters. // -// AWS DMS requires that your account have certain roles with appropriate permissions +// DMS requires that your account have certain roles with appropriate permissions // before you can create a replication instance. For information on the required -// roles, see Creating the IAM Roles to Use With the AWS CLI and AWS DMS API -// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.APIRole). +// roles, see Creating the IAM Roles to Use With the CLI and DMS API (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.APIRole). // For information on the required permissions, see IAM Permissions Needed to -// Use AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.IAMPermissions). +// Use DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.IAMPermissions). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -553,8 +557,7 @@ func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *Creat // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceAlreadyExistsFault // The resource you are attempting to create already exists. @@ -583,7 +586,7 @@ func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *Creat // The subnet provided is invalid. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CreateReplicationInstance func (c *DatabaseMigrationService) CreateReplicationInstance(input *CreateReplicationInstanceInput) (*CreateReplicationInstanceOutput, error) { @@ -662,8 +665,7 @@ func (c *DatabaseMigrationService) CreateReplicationSubnetGroupRequest(input *Cr // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceAlreadyExistsFault // The resource you are attempting to create already exists. @@ -758,8 +760,7 @@ func (c *DatabaseMigrationService) CreateReplicationTaskRequest(input *CreateRep // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * InvalidResourceStateFault // The resource is in a state that prevents it from being used for database @@ -772,7 +773,7 @@ func (c *DatabaseMigrationService) CreateReplicationTaskRequest(input *CreateRep // The resource could not be found. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // * ResourceQuotaExceededFault // The quota for this resource quota has been exceeded. @@ -937,8 +938,7 @@ func (c *DatabaseMigrationService) DeleteConnectionRequest(input *DeleteConnecti // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceNotFoundFault // The resource could not be found. @@ -1099,7 +1099,7 @@ func (c *DatabaseMigrationService) DeleteEventSubscriptionRequest(input *DeleteE // DeleteEventSubscription API operation for AWS Database Migration Service. // -// Deletes an AWS DMS event subscription. +// Deletes an DMS event subscription. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1437,7 +1437,7 @@ func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRunRequest(inp // // Deletes the record of a single premigration assessment run. // -// This operation removes all metadata that AWS DMS maintains about this assessment +// This operation removes all metadata that DMS maintains about this assessment // run. However, the operation leaves untouched all information about this assessment // run that is stored in your Amazon S3 bucket. // @@ -1450,8 +1450,7 @@ func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRunRequest(inp // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceNotFoundFault // The resource could not be found. @@ -1526,13 +1525,13 @@ func (c *DatabaseMigrationService) DescribeAccountAttributesRequest(input *Descr // DescribeAccountAttributes API operation for AWS Database Migration Service. // -// Lists all of the AWS DMS attributes for a customer account. These attributes -// include AWS DMS quotas for the account and a unique account identifier in -// a particular DMS region. DMS quotas include a list of resource quotas supported -// by the account, such as the number of replication instances allowed. The -// description for each resource quota, includes the quota name, current usage -// toward that quota, and the quota's maximum value. DMS uses the unique account -// identifier to name each artifact used by DMS in the given region. +// Lists all of the DMS attributes for a customer account. These attributes +// include DMS quotas for the account and a unique account identifier in a particular +// DMS region. DMS quotas include a list of resource quotas supported by the +// account, such as the number of replication instances allowed. The description +// for each resource quota, includes the quota name, current usage toward that +// quota, and the quota's maximum value. DMS uses the unique account identifier +// to name each artifact used by DMS in the given region. // // This command does not take any parameters. // @@ -1645,8 +1644,7 @@ func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsReques // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceNotFoundFault // The resource could not be found. @@ -2453,7 +2451,7 @@ func (c *DatabaseMigrationService) DescribeEventCategoriesRequest(input *Describ // Lists categories for all event source types, or, if specified, for a specified // source type. You can see a list of the event categories and source types // in Working with Events and Notifications (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) -// in the AWS Database Migration Service User Guide. +// in the Database Migration Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2676,9 +2674,9 @@ func (c *DatabaseMigrationService) DescribeEventsRequest(input *DescribeEventsIn // DescribeEvents API operation for AWS Database Migration Service. // // Lists events for a given source identifier and source type. You can also -// specify a start and end time. For more information on AWS DMS events, see -// Working with Events and Notifications (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) -// in the AWS Database Migration User Guide. +// specify a start and end time. For more information on DMS events, see Working +// with Events and Notifications (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) +// in the Database Migration Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3579,8 +3577,12 @@ func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentResultsReque // DescribeReplicationTaskAssessmentResults API operation for AWS Database Migration Service. // -// Returns the task assessment results from Amazon S3. This action always returns -// the latest results. +// Returns the task assessment results from the Amazon S3 bucket that DMS creates +// in your account. This action always returns the latest results. +// +// For more information about DMS task assessments, see Creating a task assessment +// report (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.AssessmentReport.html) +// in the Database Migration Service User Guide (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/Welcome.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4284,8 +4286,8 @@ func (c *DatabaseMigrationService) DescribeTableStatisticsRequest(input *Describ // name, rows inserted, rows updated, and rows deleted. // // Note that the "last updated" column the DMS console only indicates the time -// that AWS DMS last updated the table statistics record for a table. It does -// not indicate the time of the last update to the table. +// that DMS last updated the table statistics record for a table. It does not +// indicate the time of the last update to the table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4505,7 +4507,7 @@ func (c *DatabaseMigrationService) ListTagsForResourceRequest(input *ListTagsFor // ListTagsForResource API operation for AWS Database Migration Service. // -// Lists all metadata tags attached to an AWS DMS resource, including replication +// Lists all metadata tags attached to an DMS resource, including replication // instance, endpoint, security group, and migration task. For more information, // see Tag (https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) // data type description. @@ -4589,6 +4591,13 @@ func (c *DatabaseMigrationService) ModifyEndpointRequest(input *ModifyEndpointIn // // Modifies the specified endpoint. // +// For a MySQL source or target endpoint, don't explicitly specify the database +// using the DatabaseName request parameter on the ModifyEndpoint API call. +// Specifying DatabaseName when you modify a MySQL endpoint replicates all the +// task tables to this single database. For MySQL endpoints, you specify the +// database only when you specify the schema in the table-mapping rules of the +// DMS task. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4608,11 +4617,10 @@ func (c *DatabaseMigrationService) ModifyEndpointRequest(input *ModifyEndpointIn // The resource you are attempting to create already exists. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ModifyEndpoint func (c *DatabaseMigrationService) ModifyEndpoint(input *ModifyEndpointInput) (*ModifyEndpointOutput, error) { @@ -4680,7 +4688,7 @@ func (c *DatabaseMigrationService) ModifyEventSubscriptionRequest(input *ModifyE // ModifyEventSubscription API operation for AWS Database Migration Service. // -// Modifies an existing AWS DMS event notification subscription. +// Modifies an existing DMS event notification subscription. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4710,13 +4718,13 @@ func (c *DatabaseMigrationService) ModifyEventSubscriptionRequest(input *ModifyE // The specified master key (CMK) isn't enabled. // // * KMSInvalidStateFault -// The state of the specified AWS KMS resource isn't valid for this request. +// The state of the specified KMS resource isn't valid for this request. // // * KMSNotFoundFault -// The specified AWS KMS entity or resource can't be found. +// The specified KMS entity or resource can't be found. // // * KMSThrottlingFault -// This request triggered AWS KMS request throttling. +// This request triggered KMS request throttling. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ModifyEventSubscription func (c *DatabaseMigrationService) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*ModifyEventSubscriptionOutput, error) { @@ -4799,8 +4807,7 @@ func (c *DatabaseMigrationService) ModifyReplicationInstanceRequest(input *Modif // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * InvalidResourceStateFault // The resource is in a state that prevents it from being used for database @@ -4898,8 +4905,7 @@ func (c *DatabaseMigrationService) ModifyReplicationSubnetGroupRequest(input *Mo // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceNotFoundFault // The resource could not be found. @@ -4988,9 +4994,8 @@ func (c *DatabaseMigrationService) ModifyReplicationTaskRequest(input *ModifyRep // You can't modify the task endpoints. The task must be stopped before you // can modify it. // -// For more information about AWS DMS tasks, see Working with Migration Tasks -// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) in the -// AWS Database Migration Service User Guide. +// For more information about DMS tasks, see Working with Migration Tasks (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) +// in the Database Migration Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5011,7 +5016,7 @@ func (c *DatabaseMigrationService) ModifyReplicationTaskRequest(input *ModifyRep // The resource you are attempting to create already exists. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ModifyReplicationTask func (c *DatabaseMigrationService) ModifyReplicationTask(input *ModifyReplicationTaskInput) (*ModifyReplicationTaskOutput, error) { @@ -5081,7 +5086,7 @@ func (c *DatabaseMigrationService) MoveReplicationTaskRequest(input *MoveReplica // // Moves a replication task from its current replication instance to a different // target replication instance using the specified parameters. The target replication -// instance must be created with the same or later AWS DMS version as the current +// instance must be created with the same or later DMS version as the current // replication instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5093,8 +5098,7 @@ func (c *DatabaseMigrationService) MoveReplicationTaskRequest(input *MoveReplica // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * InvalidResourceStateFault // The resource is in a state that prevents it from being used for database @@ -5104,7 +5108,7 @@ func (c *DatabaseMigrationService) MoveReplicationTaskRequest(input *MoveReplica // The resource could not be found. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/MoveReplicationTask func (c *DatabaseMigrationService) MoveReplicationTask(input *MoveReplicationTaskInput) (*MoveReplicationTaskOutput, error) { @@ -5276,7 +5280,7 @@ func (c *DatabaseMigrationService) RefreshSchemasRequest(input *RefreshSchemasIn // The resource could not be found. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // * ResourceQuotaExceededFault // The quota for this resource quota has been exceeded. @@ -5431,7 +5435,7 @@ func (c *DatabaseMigrationService) RemoveTagsFromResourceRequest(input *RemoveTa // RemoveTagsFromResource API operation for AWS Database Migration Service. // -// Removes metadata tags from an AWS DMS resource, including replication instance, +// Removes metadata tags from an DMS resource, including replication instance, // endpoint, security group, and migration task. For more information, see Tag // (https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) data type // description. @@ -5515,9 +5519,8 @@ func (c *DatabaseMigrationService) StartReplicationTaskRequest(input *StartRepli // // Starts the replication task. // -// For more information about AWS DMS tasks, see Working with Migration Tasks -// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) in the -// AWS Database Migration Service User Guide. +// For more information about DMS tasks, see Working with Migration Tasks (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) +// in the Database Migration Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5535,8 +5538,7 @@ func (c *DatabaseMigrationService) StartReplicationTaskRequest(input *StartRepli // migration. // // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StartReplicationTask func (c *DatabaseMigrationService) StartReplicationTask(input *StartReplicationTaskInput) (*StartReplicationTaskOutput, error) { @@ -5707,8 +5709,7 @@ func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRunRequest(inpu // // Returned Error Types: // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // * ResourceNotFoundFault // The resource could not be found. @@ -5725,17 +5726,16 @@ func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRunRequest(inpu // The specified master key (CMK) isn't enabled. // // * KMSFault -// An AWS Key Management Service (AWS KMS) error is preventing access to AWS -// KMS. +// An Key Management Service (KMS) error is preventing access to KMS. // // * KMSInvalidStateFault -// The state of the specified AWS KMS resource isn't valid for this request. +// The state of the specified KMS resource isn't valid for this request. // // * KMSNotFoundFault -// The specified AWS KMS entity or resource can't be found. +// The specified KMS entity or resource can't be found. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // * S3AccessDeniedFault // Insufficient privileges are preventing access to an Amazon S3 object. @@ -5913,14 +5913,13 @@ func (c *DatabaseMigrationService) TestConnectionRequest(input *TestConnectionIn // migration. // // * KMSKeyNotAccessibleFault -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. // // * ResourceQuotaExceededFault // The quota for this resource quota has been exceeded. // // * AccessDeniedFault -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/TestConnection func (c *DatabaseMigrationService) TestConnection(input *TestConnectionInput) (*TestConnectionOutput, error) { @@ -5944,8 +5943,7 @@ func (c *DatabaseMigrationService) TestConnectionWithContext(ctx aws.Context, in return out, req.Send() } -// AWS DMS was denied access to the endpoint. Check that the role is correctly -// configured. +// DMS was denied access to the endpoint. Check that the role is correctly configured. type AccessDeniedFault struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -6001,12 +5999,12 @@ func (s *AccessDeniedFault) RequestID() string { return s.RespMetadata.RequestID } -// Describes a quota for an AWS account, for example, the number of replication -// instances allowed. +// Describes a quota for an account, for example the number of replication instances +// allowed. type AccountQuota struct { _ struct{} `type:"structure"` - // The name of the AWS DMS quota for this AWS account. + // The name of the DMS quota for this account. AccountQuotaName *string `type:"string"` // The maximum allowed value for the quota. @@ -6044,14 +6042,14 @@ func (s *AccountQuota) SetUsed(v int64) *AccountQuota { return s } -// Associates a set of tags with an AWS DMS resource. +// Associates a set of tags with an DMS resource. type AddTagsToResourceInput struct { _ struct{} `type:"structure"` - // Identifies the AWS DMS resource to which tags should be added. The value - // for this parameter is an Amazon Resource Name (ARN). + // Identifies the DMS resource to which tags should be added. The value for + // this parameter is an Amazon Resource Name (ARN). // - // For AWS DMS, you can tag a replication instance, an endpoint, or a replication + // For DMS, you can tag a replication instance, an endpoint, or a replication // task. // // ResourceArn is a required field @@ -6138,7 +6136,7 @@ type ApplyPendingMaintenanceActionInput struct { // OptInType is a required field OptInType *string `type:"string" required:"true"` - // The Amazon Resource Name (ARN) of the AWS DMS resource that the pending maintenance + // The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance // action applies to. // // ReplicationInstanceArn is a required field @@ -6195,8 +6193,7 @@ func (s *ApplyPendingMaintenanceActionInput) SetReplicationInstanceArn(v string) type ApplyPendingMaintenanceActionOutput struct { _ struct{} `type:"structure"` - // The AWS DMS resource that the pending maintenance action will be applied - // to. + // The DMS resource that the pending maintenance action will be applied to. ResourcePendingMaintenanceActions *ResourcePendingMaintenanceActions `type:"structure"` } @@ -6218,9 +6215,9 @@ func (s *ApplyPendingMaintenanceActionOutput) SetResourcePendingMaintenanceActio // The name of an Availability Zone for use during database migration. AvailabilityZone // is an optional parameter to the CreateReplicationInstance (https://docs.aws.amazon.com/dms/latest/APIReference/API_CreateReplicationInstance.html) -// operation, and it’s value relates to the AWS Region of an endpoint. For -// example, the availability zone of an endpoint in the us-east-1 region might -// be us-east-1a, us-east-1b, us-east-1c, or us-east-1d. +// operation, and it’s value relates to the Region of an endpoint. For example, +// the availability zone of an endpoint in the us-east-1 region might be us-east-1a, +// us-east-1b, us-east-1c, or us-east-1d. type AvailabilityZone struct { _ struct{} `type:"structure"` @@ -6502,7 +6499,8 @@ type CreateEndpointInput struct { // The Amazon Resource Name (ARN) for the certificate. CertificateArn *string `type:"string"` - // The name of the endpoint database. + // The name of the endpoint database. For a MySQL source or target endpoint, + // do not specify DatabaseName. DatabaseName *string `type:"string"` // The settings in JSON format for the DMS transfer type of source endpoint. @@ -6510,18 +6508,14 @@ type CreateEndpointInput struct { // Possible settings include the following: // // * ServiceAccessRoleArn - The IAM role that has permission to access the - // Amazon S3 bucket. + // Amazon S3 bucket. The role must allow the iam:PassRole action. // // * BucketName - The name of the S3 bucket to use. // - // * CompressionType - An optional parameter to use GZIP to compress the - // target files. To use GZIP, set this value to NONE (the default). To keep - // the files uncompressed, don't use this value. - // - // Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string + // Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string // // JSON syntax for these settings is as follows: { "ServiceAccessRoleArn": "string", - // "BucketName": "string", "CompressionType": "none"|"gzip" } + // "BucketName": "string", } DmsTransferSettings *DmsTransferSettings `type:"structure"` // Provides information that defines a DocumentDB endpoint. @@ -6529,14 +6523,14 @@ type CreateEndpointInput struct { // Settings in JSON format for the target Amazon DynamoDB endpoint. For information // about other available settings, see Using Object Mapping to Migrate Data - // to DynamoDB (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html) - // in the AWS Database Migration Service User Guide. + // to DynamoDB (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html#CHAP_Target.DynamoDB.ObjectMapping) + // in the Database Migration Service User Guide. DynamoDbSettings *DynamoDbSettings `type:"structure"` // Settings in JSON format for the target Elasticsearch endpoint. For more information // about the available settings, see Extra Connection Attributes When Using - // Elasticsearch as a Target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration) - // in the AWS Database Migration Service User Guide. + // Elasticsearch as a Target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration) + // in the Database Migration Service User Guide. ElasticsearchSettings *ElasticsearchSettings `type:"structure"` // The database endpoint identifier. Identifiers must begin with a letter and @@ -6566,72 +6560,71 @@ type CreateEndpointInput struct { // as a name-value pair associated by an equal sign (=). Multiple attributes // are separated by a semicolon (;) with no additional white space. For information // on the attributes available for connecting your source or target endpoint, - // see Working with AWS DMS Endpoints (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Endpoints.html) - // in the AWS Database Migration Service User Guide. + // see Working with DMS Endpoints (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Endpoints.html) + // in the Database Migration Service User Guide. ExtraConnectionAttributes *string `type:"string"` // Settings in JSON format for the source IBM Db2 LUW endpoint. For information // about other available settings, see Extra connection attributes when using - // Db2 LUW as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.html) - // in the AWS Database Migration Service User Guide. + // Db2 LUW as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.html#CHAP_Source.DB2.ConnectionAttrib) + // in the Database Migration Service User Guide. IBMDb2Settings *IBMDb2Settings `type:"structure"` // Settings in JSON format for the target Apache Kafka endpoint. For more information - // about the available settings, see Using Apache Kafka as a Target for AWS - // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) - // in the AWS Database Migration Service User Guide. + // about the available settings, see Using object mapping to migrate data to + // a Kafka topic (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html#CHAP_Target.Kafka.ObjectMapping) + // in the Database Migration Service User Guide. KafkaSettings *KafkaSettings `type:"structure"` // Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. - // For more information about the available settings, see Using Amazon Kinesis - // Data Streams as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html) - // in the AWS Database Migration Service User Guide. + // For more information about the available settings, see Using object mapping + // to migrate data to a Kinesis data stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) + // in the Database Migration Service User Guide. KinesisSettings *KinesisSettings `type:"structure"` - // An AWS KMS key identifier that is used to encrypt the connection parameters - // for the endpoint. + // An KMS key identifier that is used to encrypt the connection parameters for + // the endpoint. // - // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses - // your default encryption key. + // If you don't specify a value for the KmsKeyId parameter, then DMS uses your + // default encryption key. // - // AWS KMS creates the default encryption key for your AWS account. Your AWS - // account has a different default encryption key for each AWS Region. + // KMS creates the default encryption key for your account. Your account has + // a different default encryption key for each Region. KmsKeyId *string `type:"string"` // Settings in JSON format for the source and target Microsoft SQL Server endpoint. // For information about other available settings, see Extra connection attributes - // when using SQL Server as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.html) - // and Extra connection attributes when using SQL Server as a target for AWS - // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.html) - // in the AWS Database Migration Service User Guide. + // when using SQL Server as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.html#CHAP_Source.SQLServer.ConnectionAttrib) + // and Extra connection attributes when using SQL Server as a target for DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.html#CHAP_Target.SQLServer.ConnectionAttrib) + // in the Database Migration Service User Guide. MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` // Settings in JSON format for the source MongoDB endpoint. For more information - // about the available settings, see Using MongoDB as a Target for AWS Database - // Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html#CHAP_Source.MongoDB.Configuration) - // in the AWS Database Migration Service User Guide. + // about the available settings, see Endpoint configuration settings when using + // MongoDB as a source for Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html#CHAP_Source.MongoDB.Configuration) + // in the Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` // Settings in JSON format for the source and target MySQL endpoint. For information // about other available settings, see Extra connection attributes when using - // MySQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.html) + // MySQL as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.html#CHAP_Source.MySQL.ConnectionAttrib) // and Extra connection attributes when using a MySQL-compatible database as - // a target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.html) - // in the AWS Database Migration Service User Guide. + // a target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.html#CHAP_Target.MySQL.ConnectionAttrib) + // in the Database Migration Service User Guide. MySQLSettings *MySQLSettings `type:"structure"` // Settings in JSON format for the target Amazon Neptune endpoint. For more - // information about the available settings, see Specifying Endpoint Settings - // for Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) - // in the AWS Database Migration Service User Guide. + // information about the available settings, see Specifying graph-mapping rules + // using Gremlin and R2RML for Amazon Neptune as a target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) + // in the Database Migration Service User Guide. NeptuneSettings *NeptuneSettings `type:"structure"` // Settings in JSON format for the source and target Oracle endpoint. For information // about other available settings, see Extra connection attributes when using - // Oracle as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html) - // and Extra connection attributes when using Oracle as a target for AWS DMS - // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.html) - // in the AWS Database Migration Service User Guide. + // Oracle as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.ConnectionAttrib) + // and Extra connection attributes when using Oracle as a target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.html#CHAP_Target.Oracle.ConnectionAttrib) + // in the Database Migration Service User Guide. OracleSettings *OracleSettings `type:"structure"` // The password to be used to log in to the endpoint database. @@ -6642,10 +6635,10 @@ type CreateEndpointInput struct { // Settings in JSON format for the source and target PostgreSQL endpoint. For // information about other available settings, see Extra connection attributes - // when using PostgreSQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html) - // and Extra connection attributes when using PostgreSQL as a target for AWS - // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.html) - // in the AWS Database Migration Service User Guide. + // when using PostgreSQL as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib) + // and Extra connection attributes when using PostgreSQL as a target for DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.html#CHAP_Target.PostgreSQL.ConnectionAttrib) + // in the Database Migration Service User Guide. PostgreSQLSettings *PostgreSQLSettings `type:"structure"` // Provides information that defines an Amazon Redshift endpoint. @@ -6657,21 +6650,21 @@ type CreateEndpointInput struct { // letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain // two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. // For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. - // If you don't specify a ResourceIdentifier value, AWS DMS generates a default + // If you don't specify a ResourceIdentifier value, DMS generates a default // identifier value for the end of EndpointArn. ResourceIdentifier *string `type:"string"` // Settings in JSON format for the target Amazon S3 endpoint. For more information // about the available settings, see Extra Connection Attributes When Using - // Amazon S3 as a Target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring) - // in the AWS Database Migration Service User Guide. + // Amazon S3 as a Target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring) + // in the Database Migration Service User Guide. S3Settings *S3Settings `type:"structure"` // The name of the server where the endpoint database resides. ServerName *string `type:"string"` // The Amazon Resource Name (ARN) for the service access role that you want - // to use to create the endpoint. + // to use to create the endpoint. The role must allow the iam:PassRole action. ServiceAccessRoleArn *string `type:"string"` // The Secure Sockets Layer (SSL) mode to use for the SSL connection. The default @@ -6680,10 +6673,9 @@ type CreateEndpointInput struct { // Settings in JSON format for the source and target SAP ASE endpoint. For information // about other available settings, see Extra connection attributes when using - // SAP ASE as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.html) - // and Extra connection attributes when using SAP ASE as a target for AWS DMS - // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.html) in - // the AWS Database Migration Service User Guide. + // SAP ASE as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.html#CHAP_Source.SAP.ConnectionAttrib) + // and Extra connection attributes when using SAP ASE as a target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.html#CHAP_Target.SAP.ConnectionAttrib) + // in the Database Migration Service User Guide. SybaseSettings *SybaseSettings `type:"structure"` // One or more tags to be assigned to the endpoint. @@ -6961,7 +6953,7 @@ type CreateEventSubscriptionInput struct { // A list of event categories for a source type that you want to subscribe to. // For more information, see Working with Events and Notifications (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) - // in the AWS Database Migration Service User Guide. + // in the Database Migration Service User Guide. EventCategories []*string `type:"list"` // The Amazon Resource Name (ARN) of the Amazon SNS topic created for event @@ -6971,7 +6963,7 @@ type CreateEventSubscriptionInput struct { // SnsTopicArn is a required field SnsTopicArn *string `type:"string" required:"true"` - // A list of identifiers for which AWS DMS provides notification events. + // A list of identifiers for which DMS provides notification events. // // If you don't specify a value, notifications are provided for all sources. // @@ -6980,16 +6972,16 @@ type CreateEventSubscriptionInput struct { // be database instance IDs. SourceIds []*string `type:"list"` - // The type of AWS DMS resource that generates the events. For example, if you - // want to be notified of events generated by a replication instance, you set - // this parameter to replication-instance. If this value isn't specified, all - // events are returned. + // The type of DMS resource that generates the events. For example, if you want + // to be notified of events generated by a replication instance, you set this + // parameter to replication-instance. If this value isn't specified, all events + // are returned. // // Valid values: replication-instance | replication-task SourceType *string `type:"string"` - // The name of the AWS DMS event notification subscription. This name must be - // less than 255 characters. + // The name of the DMS event notification subscription. This name must be less + // than 255 characters. // // SubscriptionName is a required field SubscriptionName *string `type:"string" required:"true"` @@ -7105,7 +7097,7 @@ type CreateReplicationInstanceInput struct { // The Availability Zone where the replication instance will be created. The // default value is a random, system-chosen Availability Zone in the endpoint's - // AWS Region, for example: us-east-1d + // Region, for example: us-east-1d AvailabilityZone *string `type:"string"` // A list of custom DNS name servers supported for the replication instance @@ -7121,14 +7113,14 @@ type CreateReplicationInstanceInput struct { // is created, the default is the latest engine version available. EngineVersion *string `type:"string"` - // An AWS KMS key identifier that is used to encrypt the data on the replication + // An KMS key identifier that is used to encrypt the data on the replication // instance. // - // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses - // your default encryption key. + // If you don't specify a value for the KmsKeyId parameter, then DMS uses your + // default encryption key. // - // AWS KMS creates the default encryption key for your AWS account. Your AWS - // account has a different default encryption key for each AWS Region. + // KMS creates the default encryption key for your account. Your account has + // a different default encryption key for each Region. KmsKeyId *string `type:"string"` // Specifies whether the replication instance is a Multi-AZ deployment. You @@ -7142,7 +7134,7 @@ type CreateReplicationInstanceInput struct { // Format: ddd:hh24:mi-ddd:hh24:mi // // Default: A 30-minute window selected at random from an 8-hour block of time - // per AWS Region, occurring on a random day of the week. + // per Region, occurring on a random day of the week. // // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun // @@ -7159,8 +7151,8 @@ type CreateReplicationInstanceInput struct { // class dms.c4.large, set this parameter to "dms.c4.large". // // For more information on the settings and capacities for the available replication - // instance classes, see Selecting the right AWS DMS replication instance for - // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). + // instance classes, see Selecting the right DMS replication instance for your + // migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). // // ReplicationInstanceClass is a required field ReplicationInstanceClass *string `type:"string" required:"true"` @@ -7190,7 +7182,7 @@ type CreateReplicationInstanceInput struct { // letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain // two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. // For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. - // If you don't specify a ResourceIdentifier value, AWS DMS generates a default + // If you don't specify a ResourceIdentifier value, DMS generates a default // identifier value for the end of EndpointArn. ResourceIdentifier *string `type:"string"` @@ -7465,7 +7457,7 @@ type CreateReplicationTaskInput struct { // replication slot should already be created and associated with the source // endpoint. You can verify this by setting the slotName extra connection attribute // to the name of this logical replication slot. For more information, see Extra - // Connection Attributes When Using PostgreSQL as a Source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). + // Connection Attributes When Using PostgreSQL as a Source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). CdcStartPosition *string `type:"string"` // Indicates the start time for a change data capture (CDC) operation. Use either @@ -7508,8 +7500,8 @@ type CreateReplicationTaskInput struct { ReplicationTaskIdentifier *string `type:"string" required:"true"` // Overall settings for the task, in JSON format. For more information, see - // Specifying Task Settings for AWS Database Migration Service Tasks (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) - // in the AWS Database Migration User Guide. + // Specifying Task Settings for Database Migration Service Tasks (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) + // in the Database Migration Service User Guide. ReplicationTaskSettings *string `type:"string"` // A friendly name for the resource identifier at the end of the EndpointArn @@ -7518,7 +7510,7 @@ type CreateReplicationTaskInput struct { // letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain // two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. // For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. - // If you don't specify a ResourceIdentifier value, AWS DMS generates a default + // If you don't specify a ResourceIdentifier value, DMS generates a default // identifier value for the end of EndpointArn. ResourceIdentifier *string `type:"string"` @@ -7529,7 +7521,7 @@ type CreateReplicationTaskInput struct { // The table mappings for the task, in JSON format. For more information, see // Using Table Mapping to Specify Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html) - // in the AWS Database Migration Service User Guide. + // in the Database Migration Service User Guide. // // TableMappings is a required field TableMappings *string `type:"string" required:"true"` @@ -7545,7 +7537,7 @@ type CreateReplicationTaskInput struct { // Supplemental information that the task requires to migrate the data for certain // source and target endpoints. For more information, see Specifying Supplemental // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) - // in the AWS Database Migration Service User Guide. + // in the Database Migration Service User Guide. TaskData *string `type:"string"` } @@ -8201,16 +8193,15 @@ type DescribeAccountAttributesOutput struct { // Account quota information. AccountQuotas []*AccountQuota `type:"list"` - // A unique AWS DMS identifier for an account in a particular AWS Region. The - // value of this identifier has the following format: c99999999999. DMS uses - // this identifier to name artifacts. For example, DMS uses this identifier - // to name the default Amazon S3 bucket for storing task assessment reports - // in a given AWS Region. The format of this S3 bucket name is the following: - // dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this - // default S3 bucket: dms-111122223333-c44445555666. + // A unique DMS identifier for an account in a particular Region. The value + // of this identifier has the following format: c99999999999. DMS uses this + // identifier to name artifacts. For example, DMS uses this identifier to name + // the default Amazon S3 bucket for storing task assessment reports in a given + // Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier. + // Here is an example name for this default S3 bucket: dms-111122223333-c44445555666. // - // AWS DMS supports the UniqueAccountIdentifier parameter in versions 3.1.4 - // and later. + // DMS supports the UniqueAccountIdentifier parameter in versions 3.1.4 and + // later. UniqueAccountIdentifier *string `type:"string"` } @@ -8330,7 +8321,7 @@ type DescribeApplicableIndividualAssessmentsOutput struct { // For more information on the available individual assessments, including compatibility // with different migration task configurations, see Working with premigration // assessment runs (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.AssessmentReport.html) - // in the AWS Database Migration Service User Guide. + // in the Database Migration Service User Guide. IndividualAssessmentNames []*string `type:"list"` // Pagination token returned for you to pass to a subsequent request. If you @@ -8878,7 +8869,7 @@ type DescribeEventCategoriesInput struct { // Filters applied to the event categories. Filters []*Filter `type:"list"` - // The type of AWS DMS resource that generates events. + // The type of DMS resource that generates events. // // Valid values: replication-instance | replication-task SourceType *string `type:"string"` @@ -8969,7 +8960,7 @@ type DescribeEventSubscriptionsInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The name of the AWS DMS event subscription to be described. + // The name of the DMS event subscription to be described. SubscriptionName *string `type:"string"` } @@ -9093,7 +9084,7 @@ type DescribeEventsInput struct { // The identifier of an event source. SourceIdentifier *string `type:"string"` - // The type of AWS DMS resource that generates events. + // The type of DMS resource that generates events. // // Valid values: replication-instance | replication-task SourceType *string `type:"string" enum:"SourceType"` @@ -10440,7 +10431,9 @@ type DmsTransferSettings struct { // The name of the S3 bucket to use. BucketName *string `type:"string"` - // The IAM role that has permission to access the Amazon S3 bucket. + // The IAM role that has permission to access the Amazon S3 bucket. When specified + // as part of request syntax, such as for the CreateEndpoint and ModifyEndpoint + // actions, the role must allow the iam:PassRole action. ServiceAccessRoleArn *string `type:"string"` } @@ -10484,11 +10477,11 @@ type DocDbSettings struct { // Default value is "false". ExtractDocId *bool `type:"boolean"` - // The AWS KMS key identifier that is used to encrypt the content on the replication - // instance. If you don't specify a value for the KmsKeyId parameter, then AWS - // DMS uses your default encryption key. AWS KMS creates the default encryption - // key for your AWS account. Your AWS account has a different default encryption - // key for each AWS Region. + // The KMS key identifier that is used to encrypt the content on the replication + // instance. If you don't specify a value for the KmsKeyId parameter, then DMS + // uses your default encryption key. KMS creates the default encryption key + // for your account. Your account has a different default encryption key for + // each Region. KmsKeyId *string `type:"string"` // Specifies either document or table mode. @@ -10504,19 +10497,20 @@ type DocDbSettings struct { // The port value for the DocumentDB source endpoint. Port *int64 `type:"integer"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the DocumentDB endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the DocumentDB endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -10606,12 +10600,13 @@ func (s *DocDbSettings) SetUsername(v string) *DocDbSettings { return s } -// Provides the Amazon Resource Name (ARN) of the AWS Identity and Access Management +// Provides the Amazon Resource Name (ARN) of the Identity and Access Management // (IAM) role used to define an Amazon DynamoDB target endpoint. type DynamoDbSettings struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) used by the service access IAM role. + // The Amazon Resource Name (ARN) used by the service to access the IAM role. + // The role must allow the iam:PassRole action. // // ServiceAccessRoleArn is a required field ServiceAccessRoleArn *string `type:"string" required:"true"` @@ -10650,7 +10645,7 @@ func (s *DynamoDbSettings) SetServiceAccessRoleArn(v string) *DynamoDbSettings { type ElasticsearchSettings struct { _ struct{} `type:"structure"` - // The endpoint for the Elasticsearch cluster. AWS DMS uses HTTPS if a transport + // The endpoint for the Elasticsearch cluster. DMS uses HTTPS if a transport // protocol (http/https) is not specified. // // EndpointUri is a required field @@ -10669,7 +10664,8 @@ type ElasticsearchSettings struct { // fail in the last 10 minutes, the full load operation stops. FullLoadErrorPercentage *int64 `type:"integer"` - // The Amazon Resource Name (ARN) used by service to access the IAM role. + // The Amazon Resource Name (ARN) used by the service to access the IAM role. + // The role must allow the iam:PassRole action. // // ServiceAccessRoleArn is a required field ServiceAccessRoleArn *string `type:"string" required:"true"` @@ -10732,8 +10728,6 @@ func (s *ElasticsearchSettings) SetServiceAccessRoleArn(v string) *Elasticsearch // // * DescribeEndpoint // -// * DescribeEndpointTypes -// // * ModifyEndpoint type Endpoint struct { _ struct{} `type:"structure"` @@ -10749,18 +10743,14 @@ type Endpoint struct { // Possible settings include the following: // // * ServiceAccessRoleArn - The IAM role that has permission to access the - // Amazon S3 bucket. + // Amazon S3 bucket. The role must allow the iam:PassRole action. // // * BucketName - The name of the S3 bucket to use. // - // * CompressionType - An optional parameter to use GZIP to compress the - // target files. To use GZIP, set this value to NONE (the default). To keep - // the files uncompressed, don't use this value. - // - // Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string,CompressionType=string + // Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string,BucketName=string, // // JSON syntax for these settings is as follows: { "ServiceAccessRoleArn": "string", - // "BucketName": "string", "CompressionType": "none"|"gzip" } + // "BucketName": "string"} DmsTransferSettings *DmsTransferSettings `type:"structure"` // Provides information that defines a DocumentDB endpoint. @@ -10818,14 +10808,14 @@ type Endpoint struct { // see the KinesisSettings structure. KinesisSettings *KinesisSettings `type:"structure"` - // An AWS KMS key identifier that is used to encrypt the connection parameters - // for the endpoint. + // An KMS key identifier that is used to encrypt the connection parameters for + // the endpoint. // - // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses - // your default encryption key. + // If you don't specify a value for the KmsKeyId parameter, then DMS uses your + // default encryption key. // - // AWS KMS creates the default encryption key for your AWS account. Your AWS - // account has a different default encryption key for each AWS Region. + // KMS creates the default encryption key for your account. Your account has + // a different default encryption key for each Region. KmsKeyId *string `type:"string"` // The settings for the Microsoft SQL Server source and target endpoint. For @@ -10865,7 +10855,8 @@ type Endpoint struct { // The name of the server at the endpoint. ServerName *string `type:"string"` - // The Amazon Resource Name (ARN) used by the service access IAM role. + // The Amazon Resource Name (ARN) used by the service to access the IAM role. + // The role must allow the iam:PassRole action. ServiceAccessRoleArn *string `type:"string"` // The SSL mode used to connect to the endpoint. The default value is none. @@ -11098,6 +11089,10 @@ type EndpointSetting struct { // endpoint type. Applicability *string `type:"string"` + // The default value of the endpoint setting if no value is specified using + // CreateEndpoint or ModifyEndpoint. + DefaultValue *string `type:"string"` + // Enumerated values to use for this endpoint. EnumValues []*string `type:"list"` @@ -11136,6 +11131,12 @@ func (s *EndpointSetting) SetApplicability(v string) *EndpointSetting { return s } +// SetDefaultValue sets the DefaultValue field's value. +func (s *EndpointSetting) SetDefaultValue(v string) *EndpointSetting { + s.DefaultValue = &v + return s +} + // SetEnumValues sets the EnumValues field's value. func (s *EndpointSetting) SetEnumValues(v []*string) *EndpointSetting { s.EnumValues = v @@ -11180,7 +11181,7 @@ func (s *EndpointSetting) SetUnits(v string) *EndpointSetting { // Describes an identifiable significant activity that affects a replication // instance or task. This object can provide the message, the available event -// categories, the date and source of the event, and the AWS DMS resource type. +// categories, the date and source of the event, and the DMS resource type. type Event struct { _ struct{} `type:"structure"` @@ -11196,7 +11197,7 @@ type Event struct { // The identifier of an event source. SourceIdentifier *string `type:"string"` - // The type of AWS DMS resource that generates events. + // The type of DMS resource that generates events. // // Valid values: replication-instance | endpoint | replication-task SourceType *string `type:"string" enum:"SourceType"` @@ -11243,7 +11244,7 @@ func (s *Event) SetSourceType(v string) *Event { } // Lists categories of events subscribed to, and generated by, the applicable -// AWS DMS resource type. This data type appears in response to the DescribeEventCategories +// DMS resource type. This data type appears in response to the DescribeEventCategories // (https://docs.aws.amazon.com/dms/latest/APIReference/API_EventCategoryGroup.html) // action. type EventCategoryGroup struct { @@ -11252,7 +11253,7 @@ type EventCategoryGroup struct { // A list of event categories from a source type that you've chosen. EventCategories []*string `type:"list"` - // The type of AWS DMS resource that generates events. + // The type of DMS resource that generates events. // // Valid values: replication-instance | replication-server | security-group // | replication-task @@ -11286,10 +11287,11 @@ func (s *EventCategoryGroup) SetSourceType(v string) *EventCategoryGroup { type EventSubscription struct { _ struct{} `type:"structure"` - // The AWS DMS event notification subscription Id. + // The DMS event notification subscription Id. CustSubscriptionId *string `type:"string"` - // The AWS customer account associated with the AWS DMS event notification subscription. + // The Amazon Web Services customer account associated with the DMS event notification + // subscription. CustomerAwsId *string `type:"string"` // Boolean value that indicates if the event subscription is enabled. @@ -11298,31 +11300,31 @@ type EventSubscription struct { // A lists of event categories. EventCategoriesList []*string `type:"list"` - // The topic ARN of the AWS DMS event notification subscription. + // The topic ARN of the DMS event notification subscription. SnsTopicArn *string `type:"string"` // A list of source Ids for the event subscription. SourceIdsList []*string `type:"list"` - // The type of AWS DMS resource that generates events. + // The type of DMS resource that generates events. // // Valid values: replication-instance | replication-server | security-group // | replication-task SourceType *string `type:"string"` - // The status of the AWS DMS event notification subscription. + // The status of the DMS event notification subscription. // // Constraints: // // Can be one of the following: creating | modifying | deleting | active | no-permission // | topic-not-exist // - // The status "no-permission" indicates that AWS DMS no longer has permission - // to post to the SNS topic. The status "topic-not-exist" indicates that the - // topic was deleted after the subscription was created. + // The status "no-permission" indicates that DMS no longer has permission to + // post to the SNS topic. The status "topic-not-exist" indicates that the topic + // was deleted after the subscription was created. Status *string `type:"string"` - // The time the AWS DMS event notification subscription was created. + // The time the DMS event notification subscription was created. SubscriptionCreationTime *string `type:"string"` } @@ -11391,7 +11393,7 @@ func (s *EventSubscription) SetSubscriptionCreationTime(v string) *EventSubscrip } // Identifies the name and value of a filter object. This filter is used to -// limit the number and type of AWS DMS objects that are returned for a particular +// limit the number and type of DMS objects that are returned for a particular // Describe* call or similar operation. Filters are used as an optional parameter // for certain API operations. type Filter struct { @@ -11464,22 +11466,23 @@ type IBMDb2Settings struct { // Endpoint connection password. Password *string `type:"string" sensitive:"true"` - // Endpoint TCP port. + // Endpoint TCP port. The default value is 50000. Port *int64 `type:"integer"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the Db2 LUW endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the Db2 LUW endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -11579,7 +11582,9 @@ type ImportCertificateInput struct { // The contents of a .pem file, which contains an X.509 certificate. CertificatePem *string `type:"string" sensitive:"true"` - // The location of an imported Oracle Wallet certificate for use with SSL. + // The location of an imported Oracle Wallet certificate for use with SSL. Provide + // the name of a .sso file using the fileb:// prefix. You can't provide the + // certificate inline. // // CertificateWallet is automatically base64 encoded/decoded by the SDK. CertificateWallet []byte `type:"blob"` @@ -11996,8 +12001,7 @@ func (s *KMSDisabledFault) RequestID() string { return s.RespMetadata.RequestID } -// An AWS Key Management Service (AWS KMS) error is preventing access to AWS -// KMS. +// An Key Management Service (KMS) error is preventing access to KMS. type KMSFault struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -12053,7 +12057,7 @@ func (s *KMSFault) RequestID() string { return s.RespMetadata.RequestID } -// The state of the specified AWS KMS resource isn't valid for this request. +// The state of the specified KMS resource isn't valid for this request. type KMSInvalidStateFault struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -12109,7 +12113,7 @@ func (s *KMSInvalidStateFault) RequestID() string { return s.RespMetadata.RequestID } -// AWS DMS cannot access the AWS KMS key. +// DMS cannot access the KMS key. type KMSKeyNotAccessibleFault struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -12165,7 +12169,7 @@ func (s *KMSKeyNotAccessibleFault) RequestID() string { return s.RespMetadata.RequestID } -// The specified AWS KMS entity or resource can't be found. +// The specified KMS entity or resource can't be found. type KMSNotFoundFault struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -12221,7 +12225,7 @@ func (s *KMSNotFoundFault) RequestID() string { return s.RespMetadata.RequestID } -// This request triggered AWS KMS request throttling. +// This request triggered KMS request throttling. type KMSThrottlingFault struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -12287,8 +12291,8 @@ type KafkaSettings struct { // that host your Kafka instance. Specify each broker location in the form broker-hostname-or-ip:port // . For example, "ec2-12-345-678-901.compute-1.amazonaws.com:2345". For more // information and examples of specifying a list of broker locations, see Using - // Apache Kafka as a target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) - // in the AWS Data Migration Service User Guide. + // Apache Kafka as a target for Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) + // in the Database Migration Service User Guide. Broker *string `type:"string"` // Shows detailed control information for table definition, column definition, @@ -12300,7 +12304,7 @@ type KafkaSettings struct { // default is false. IncludeNullAndEmpty *bool `type:"boolean"` - // Shows the partition value within the Kafka message output, unless the partition + // Shows the partition value within the Kafka message output unless the partition // type is schema-table-type. The default is false. IncludePartitionValue *bool `type:"boolean"` @@ -12323,6 +12327,11 @@ type KafkaSettings struct { // is 1,000,000. MessageMaxBytes *int64 `type:"integer"` + // If this attribute is Y, it allows hexadecimal values that don't have the + // 0x prefix when migrated to a Kafka target. If this attribute is N, all hexadecimal + // values include this prefix when migrated to Kafka. + NoHexPrefix *bool `type:"boolean"` + // Prefixes schema and table names to partition values, when the partition type // is primary-key-type. Doing this increases data distribution among Kafka partitions. // For example, suppose that a SysBench schema has thousands of tables and each @@ -12336,7 +12345,7 @@ type KafkaSettings struct { // and client using SASL-SSL authentication. SaslPassword *string `type:"string" sensitive:"true"` - // The secure username you created when you first set up your MSK cluster to + // The secure user name you created when you first set up your MSK cluster to // validate a client identity and make an encrypted connection between server // and client using SASL-SSL authentication. SaslUsername *string `type:"string"` @@ -12347,7 +12356,7 @@ type KafkaSettings struct { SecurityProtocol *string `type:"string" enum:"KafkaSecurityProtocol"` // The Amazon Resource Name (ARN) for the private Certification Authority (CA) - // cert that AWS DMS uses to securely connect to your Kafka target endpoint. + // cert that DMS uses to securely connect to your Kafka target endpoint. SslCaCertificateArn *string `type:"string"` // The Amazon Resource Name (ARN) of the client certificate used to securely @@ -12362,8 +12371,8 @@ type KafkaSettings struct { // target endpoint. SslClientKeyPassword *string `type:"string" sensitive:"true"` - // The topic to which you migrate the data. If you don't specify a topic, AWS - // DMS specifies "kafka-default-topic" as the migration topic. + // The topic to which you migrate the data. If you don't specify a topic, DMS + // specifies "kafka-default-topic" as the migration topic. Topic *string `type:"string"` } @@ -12425,6 +12434,12 @@ func (s *KafkaSettings) SetMessageMaxBytes(v int64) *KafkaSettings { return s } +// SetNoHexPrefix sets the NoHexPrefix field's value. +func (s *KafkaSettings) SetNoHexPrefix(v bool) *KafkaSettings { + s.NoHexPrefix = &v + return s +} + // SetPartitionIncludeSchemaTable sets the PartitionIncludeSchemaTable field's value. func (s *KafkaSettings) SetPartitionIncludeSchemaTable(v bool) *KafkaSettings { s.PartitionIncludeSchemaTable = &v @@ -12513,6 +12528,11 @@ type KinesisSettings struct { // is JSON (default) or JSON_UNFORMATTED (a single line with no tab). MessageFormat *string `type:"string" enum:"MessageFormatValue"` + // If this attribute is Y, it allows hexadecimal values that don't have the + // 0x prefix when migrated to a Kinesis target. If this attribute is N, all + // hexadecimal values include this prefix when migrated to Kinesis. + NoHexPrefix *bool `type:"boolean"` + // Prefixes schema and table names to partition values, when the partition type // is primary-key-type. Doing this increases data distribution among Kinesis // shards. For example, suppose that a SysBench schema has thousands of tables @@ -12521,8 +12541,8 @@ type KinesisSettings struct { // causes throttling. The default is false. PartitionIncludeSchemaTable *bool `type:"boolean"` - // The Amazon Resource Name (ARN) for the AWS Identity and Access Management - // (IAM) role that AWS DMS uses to write to the Kinesis data stream. + // The Amazon Resource Name (ARN) for the IAM role that DMS uses to write to + // the Kinesis data stream. The role must allow the iam:PassRole action. ServiceAccessRoleArn *string `type:"string"` // The Amazon Resource Name (ARN) for the Amazon Kinesis Data Streams endpoint. @@ -12575,6 +12595,12 @@ func (s *KinesisSettings) SetMessageFormat(v string) *KinesisSettings { return s } +// SetNoHexPrefix sets the NoHexPrefix field's value. +func (s *KinesisSettings) SetNoHexPrefix(v bool) *KinesisSettings { + s.NoHexPrefix = &v + return s +} + // SetPartitionIncludeSchemaTable sets the PartitionIncludeSchemaTable field's value. func (s *KinesisSettings) SetPartitionIncludeSchemaTable(v bool) *KinesisSettings { s.PartitionIncludeSchemaTable = &v @@ -12596,8 +12622,7 @@ func (s *KinesisSettings) SetStreamArn(v string) *KinesisSettings { type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS - // resource. + // The Amazon Resource Name (ARN) string that uniquely identifies the DMS resource. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` @@ -12662,8 +12687,8 @@ type MicrosoftSQLServerSettings struct { // The maximum size of the packets (in bytes) used to transfer data using BCP. BcpPacketSize *int64 `type:"integer"` - // Specifies a file group for the AWS DMS internal tables. When the replication - // task starts, all the internal AWS DMS control tables (awsdms_ apply_exception, + // Specifies a file group for the DMS internal tables. When the replication + // task starts, all the internal DMS control tables (awsdms_ apply_exception, // awsdms_apply, awsdms_changes) are created for the specified file group. ControlTablesFileGroup *string `type:"string"` @@ -12682,7 +12707,7 @@ type MicrosoftSQLServerSettings struct { // table cached in the replication instance. QuerySingleAlwaysOnNode *bool `type:"boolean"` - // When this attribute is set to Y, AWS DMS only reads changes from transaction + // When this attribute is set to Y, DMS only reads changes from transaction // log backups and doesn't read from the active transaction log file during // ongoing replication. Setting this parameter to Y enables you to control active // transaction log file growth during full load and ongoing replication tasks. @@ -12690,36 +12715,37 @@ type MicrosoftSQLServerSettings struct { ReadBackupOnly *bool `type:"boolean"` // Use this attribute to minimize the need to access the backup log and enable - // AWS DMS to prevent truncation using one of the following two methods. + // DMS to prevent truncation using one of the following two methods. // // Start transactions in the database: This is the default method. When this - // method is used, AWS DMS prevents TLOG truncation by mimicking a transaction - // in the database. As long as such a transaction is open, changes that appear + // method is used, DMS prevents TLOG truncation by mimicking a transaction in + // the database. As long as such a transaction is open, changes that appear // after the transaction started aren't truncated. If you need Microsoft Replication // to be enabled in your database, then you must choose this method. // // Exclusively use sp_repldone within a single task: When this method is used, - // AWS DMS reads the changes and then uses sp_repldone to mark the TLOG transactions + // DMS reads the changes and then uses sp_repldone to mark the TLOG transactions // as ready for truncation. Although this method doesn't involve any transactional // activities, it can only be used when Microsoft Replication isn't running. - // Also, when using this method, only one AWS DMS task can access the database - // at any given time. Therefore, if you need to run parallel AWS DMS tasks against + // Also, when using this method, only one DMS task can access the database at + // any given time. Therefore, if you need to run parallel DMS tasks against // the same database, use the default method. SafeguardPolicy *string `type:"string" enum:"SafeguardPolicy"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the SQL Server endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the SQL Server endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -12842,45 +12868,43 @@ type ModifyEndpointInput struct { // The Amazon Resource Name (ARN) of the certificate used for SSL connection. CertificateArn *string `type:"string"` - // The name of the endpoint database. + // The name of the endpoint database. For a MySQL source or target endpoint, + // do not specify DatabaseName. DatabaseName *string `type:"string"` // The settings in JSON format for the DMS transfer type of source endpoint. // // Attributes include the following: // - // * serviceAccessRoleArn - The AWS Identity and Access Management (IAM) - // role that has permission to access the Amazon S3 bucket. + // * serviceAccessRoleArn - The Identity and Access Management (IAM) role + // that has permission to access the Amazon S3 bucket. The role must allow + // the iam:PassRole action. // // * BucketName - The name of the S3 bucket to use. // - // * compressionType - An optional parameter to use GZIP to compress the - // target files. Either set this parameter to NONE (the default) or don't - // use it to leave the files uncompressed. - // // Shorthand syntax for these settings is as follows: ServiceAccessRoleArn=string - // ,BucketName=string,CompressionType=string + // ,BucketName=string // // JSON syntax for these settings is as follows: { "ServiceAccessRoleArn": "string", - // "BucketName": "string", "CompressionType": "none"|"gzip" } + // "BucketName": "string"} DmsTransferSettings *DmsTransferSettings `type:"structure"` // Settings in JSON format for the source DocumentDB endpoint. For more information // about the available settings, see the configuration properties section in - // Using DocumentDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DocumentDB.html) - // in the AWS Database Migration Service User Guide. + // Using DocumentDB as a Target for Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DocumentDB.html) + // in the Database Migration Service User Guide. DocDbSettings *DocDbSettings `type:"structure"` // Settings in JSON format for the target Amazon DynamoDB endpoint. For information // about other available settings, see Using Object Mapping to Migrate Data - // to DynamoDB (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html) - // in the AWS Database Migration Service User Guide. + // to DynamoDB (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html#CHAP_Target.DynamoDB.ObjectMapping) + // in the Database Migration Service User Guide. DynamoDbSettings *DynamoDbSettings `type:"structure"` // Settings in JSON format for the target Elasticsearch endpoint. For more information // about the available settings, see Extra Connection Attributes When Using - // Elasticsearch as a Target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration) - // in the AWS Database Migration Service User Guide. + // Elasticsearch as a Target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration) + // in the Database Migration Service User Guide. ElasticsearchSettings *ElasticsearchSettings `type:"structure"` // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. @@ -12902,6 +12926,27 @@ type ModifyEndpointInput struct { // "kafka", "elasticsearch", "documentdb", "sqlserver", and "neptune". EngineName *string `type:"string"` + // If this attribute is Y, the current call to ModifyEndpoint replaces all existing + // endpoint settings with the exact settings that you specify in this call. + // If this attribute is N, the current call to ModifyEndpoint does two things: + // + // * It replaces any endpoint settings that already exist with new values, + // for settings with the same names. + // + // * It creates new endpoint settings that you specify in the call, for settings + // with different names. + // + // For example, if you call create-endpoint ... --endpoint-settings '{"a":1}' + // ..., the endpoint has the following endpoint settings: '{"a":1}'. If you + // then call modify-endpoint ... --endpoint-settings '{"b":2}' ... for the same + // endpoint, the endpoint has the following settings: '{"a":1,"b":2}'. + // + // However, suppose that you follow this with a call to modify-endpoint ... + // --endpoint-settings '{"b":2}' --exact-settings ... for that same endpoint + // again. Then the endpoint has the following settings: '{"b":2}'. All existing + // settings are replaced with the exact settings that you specify. + ExactSettings *bool `type:"boolean"` + // The external table definition. ExternalTableDefinition *string `type:"string"` @@ -12911,56 +12956,56 @@ type ModifyEndpointInput struct { // Settings in JSON format for the source IBM Db2 LUW endpoint. For information // about other available settings, see Extra connection attributes when using - // Db2 LUW as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.ConnectionAttrib) - // in the AWS Database Migration Service User Guide. + // Db2 LUW as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.html#CHAP_Source.DB2.ConnectionAttrib) + // in the Database Migration Service User Guide. IBMDb2Settings *IBMDb2Settings `type:"structure"` // Settings in JSON format for the target Apache Kafka endpoint. For more information - // about the available settings, see Using Apache Kafka as a Target for AWS - // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) - // in the AWS Database Migration Service User Guide. + // about the available settings, see Using object mapping to migrate data to + // a Kafka topic (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html#CHAP_Target.Kafka.ObjectMapping) + // in the Database Migration Service User Guide. KafkaSettings *KafkaSettings `type:"structure"` // Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. - // For more information about the available settings, see Using Amazon Kinesis - // Data Streams as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html) - // in the AWS Database Migration Service User Guide. + // For more information about the available settings, see Using object mapping + // to migrate data to a Kinesis data stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) + // in the Database Migration Service User Guide. KinesisSettings *KinesisSettings `type:"structure"` // Settings in JSON format for the source and target Microsoft SQL Server endpoint. // For information about other available settings, see Extra connection attributes - // when using SQL Server as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.ConnectionAttrib) - // and Extra connection attributes when using SQL Server as a target for AWS - // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.ConnectionAttrib) - // in the AWS Database Migration Service User Guide. + // when using SQL Server as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.html#CHAP_Source.SQLServer.ConnectionAttrib) + // and Extra connection attributes when using SQL Server as a target for DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.html#CHAP_Target.SQLServer.ConnectionAttrib) + // in the Database Migration Service User Guide. MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` // Settings in JSON format for the source MongoDB endpoint. For more information // about the available settings, see the configuration properties section in - // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) - // in the AWS Database Migration Service User Guide. + // Endpoint configuration settings when using MongoDB as a source for Database + // Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html#CHAP_Source.MongoDB.Configuration) + // in the Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` // Settings in JSON format for the source and target MySQL endpoint. For information // about other available settings, see Extra connection attributes when using - // MySQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.ConnectionAttrib) + // MySQL as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.html#CHAP_Source.MySQL.ConnectionAttrib) // and Extra connection attributes when using a MySQL-compatible database as - // a target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.ConnectionAttrib) - // in the AWS Database Migration Service User Guide. + // a target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.html#CHAP_Target.MySQL.ConnectionAttrib) + // in the Database Migration Service User Guide. MySQLSettings *MySQLSettings `type:"structure"` // Settings in JSON format for the target Amazon Neptune endpoint. For more - // information about the available settings, see Specifying Endpoint Settings - // for Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) - // in the AWS Database Migration Service User Guide. + // information about the available settings, see Specifying graph-mapping rules + // using Gremlin and R2RML for Amazon Neptune as a target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) + // in the Database Migration Service User Guide. NeptuneSettings *NeptuneSettings `type:"structure"` // Settings in JSON format for the source and target Oracle endpoint. For information // about other available settings, see Extra connection attributes when using - // Oracle as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.ConnectionAttrib) - // and Extra connection attributes when using Oracle as a target for AWS DMS - // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.ConnectionAttrib) - // in the AWS Database Migration Service User Guide. + // Oracle as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.ConnectionAttrib) + // and Extra connection attributes when using Oracle as a target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.html#CHAP_Target.Oracle.ConnectionAttrib) + // in the Database Migration Service User Guide. OracleSettings *OracleSettings `type:"structure"` // The password to be used to login to the endpoint database. @@ -12971,10 +13016,10 @@ type ModifyEndpointInput struct { // Settings in JSON format for the source and target PostgreSQL endpoint. For // information about other available settings, see Extra connection attributes - // when using PostgreSQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.ConnectionAttrib) - // and Extra connection attributes when using PostgreSQL as a target for AWS - // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.ConnectionAttrib) - // in the AWS Database Migration Service User Guide. + // when using PostgreSQL as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib) + // and Extra connection attributes when using PostgreSQL as a target for DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.html#CHAP_Target.PostgreSQL.ConnectionAttrib) + // in the Database Migration Service User Guide. PostgreSQLSettings *PostgreSQLSettings `type:"structure"` // Provides information that defines an Amazon Redshift endpoint. @@ -12982,15 +13027,15 @@ type ModifyEndpointInput struct { // Settings in JSON format for the target Amazon S3 endpoint. For more information // about the available settings, see Extra Connection Attributes When Using - // Amazon S3 as a Target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring) - // in the AWS Database Migration Service User Guide. + // Amazon S3 as a Target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring) + // in the Database Migration Service User Guide. S3Settings *S3Settings `type:"structure"` // The name of the server where the endpoint database resides. ServerName *string `type:"string"` - // The Amazon Resource Name (ARN) for the service access role you want to use - // to modify the endpoint. + // The Amazon Resource Name (ARN) for the IAM role you want to use to modify + // the endpoint. The role must allow the iam:PassRole action. ServiceAccessRoleArn *string `type:"string"` // The SSL mode used to connect to the endpoint. The default value is none. @@ -12998,10 +13043,9 @@ type ModifyEndpointInput struct { // Settings in JSON format for the source and target SAP ASE endpoint. For information // about other available settings, see Extra connection attributes when using - // SAP ASE as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.ConnectionAttrib) - // and Extra connection attributes when using SAP ASE as a target for AWS DMS - // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.ConnectionAttrib) - // in the AWS Database Migration Service User Guide. + // SAP ASE as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.html#CHAP_Source.SAP.ConnectionAttrib) + // and Extra connection attributes when using SAP ASE as a target for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.html#CHAP_Target.SAP.ConnectionAttrib) + // in the Database Migration Service User Guide. SybaseSettings *SybaseSettings `type:"structure"` // The user name to be used to login to the endpoint database. @@ -13106,6 +13150,12 @@ func (s *ModifyEndpointInput) SetEngineName(v string) *ModifyEndpointInput { return s } +// SetExactSettings sets the ExactSettings field's value. +func (s *ModifyEndpointInput) SetExactSettings(v bool) *ModifyEndpointInput { + s.ExactSettings = &v + return s +} + // SetExternalTableDefinition sets the ExternalTableDefinition field's value. func (s *ModifyEndpointInput) SetExternalTableDefinition(v string) *ModifyEndpointInput { s.ExternalTableDefinition = &v @@ -13264,13 +13314,13 @@ type ModifyEventSubscriptionInput struct { // subscribe to it. SnsTopicArn *string `type:"string"` - // The type of AWS DMS resource that generates the events you want to subscribe + // The type of DMS resource that generates the events you want to subscribe // to. // // Valid values: replication-instance | replication-task SourceType *string `type:"string"` - // The name of the AWS DMS event notification subscription to be modified. + // The name of the DMS event notification subscription to be modified. // // SubscriptionName is a required field SubscriptionName *string `type:"string" required:"true"` @@ -13383,7 +13433,7 @@ type ModifyReplicationInstanceInput struct { // // * A newer minor version is available. // - // * AWS DMS has enabled automatic patching for the given engine version. + // * DMS has enabled automatic patching for the given engine version. AutoMinorVersionUpgrade *bool `type:"boolean"` // The engine version number of the replication instance. @@ -13423,8 +13473,8 @@ type ModifyReplicationInstanceInput struct { // class dms.c4.large, set this parameter to "dms.c4.large". // // For more information on the settings and capacities for the available replication - // instance classes, see Selecting the right AWS DMS replication instance for - // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). + // instance classes, see Selecting the right DMS replication instance for your + // migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` // The replication instance identifier. This parameter is stored as a lowercase @@ -13652,7 +13702,7 @@ type ModifyReplicationTaskInput struct { // replication slot should already be created and associated with the source // endpoint. You can verify this by setting the slotName extra connection attribute // to the name of this logical replication slot. For more information, see Extra - // Connection Attributes When Using PostgreSQL as a Source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). + // Connection Attributes When Using PostgreSQL as a Source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). CdcStartPosition *string `type:"string"` // Indicates the start time for a change data capture (CDC) operation. Use either @@ -13693,7 +13743,7 @@ type ModifyReplicationTaskInput struct { // JSON file that contains settings for the task, such as task metadata settings. ReplicationTaskSettings *string `type:"string"` - // When using the AWS CLI or boto3, provide the path of the JSON file that contains + // When using the CLI or boto3, provide the path of the JSON file that contains // the table mappings. Precede the path with file://. For example, --table-mappings // file://mappingfile.json. When working with the DMS API, provide the JSON // as the parameter value. @@ -13702,7 +13752,7 @@ type ModifyReplicationTaskInput struct { // Supplemental information that the task requires to migrate the data for certain // source and target endpoints. For more information, see Specifying Supplemental // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) - // in the AWS Database Migration Service User Guide. + // in the Database Migration Service User Guide. TaskData *string `type:"string"` } @@ -13843,11 +13893,11 @@ type MongoDbSettings struct { // Default value is "false". ExtractDocId *string `type:"string"` - // The AWS KMS key identifier that is used to encrypt the content on the replication - // instance. If you don't specify a value for the KmsKeyId parameter, then AWS - // DMS uses your default encryption key. AWS KMS creates the default encryption - // key for your AWS account. Your AWS account has a different default encryption - // key for each AWS Region. + // The KMS key identifier that is used to encrypt the content on the replication + // instance. If you don't specify a value for the KmsKeyId parameter, then DMS + // uses your default encryption key. KMS creates the default encryption key + // for your account. Your account has a different default encryption key for + // each Region. KmsKeyId *string `type:"string"` // Specifies either document or table mode. @@ -13862,19 +13912,20 @@ type MongoDbSettings struct { // The port value for the MongoDB source endpoint. Port *int64 `type:"integer"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the MongoDB endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the MongoDB endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -14061,9 +14112,12 @@ func (s *MoveReplicationTaskOutput) SetReplicationTask(v *ReplicationTask) *Move type MySQLSettings struct { _ struct{} `type:"structure"` - // Specifies a script to run immediately after AWS DMS connects to the endpoint. + // Specifies a script to run immediately after DMS connects to the endpoint. // The migration task continues running regardless if the SQL statement succeeds // or fails. + // + // For this parameter, provide the code of the script itself, not the name of + // a file containing the script. AfterConnectScript *string `type:"string"` // Adjusts the behavior of DMS when migrating from an SQL Server source database @@ -14072,7 +14126,12 @@ type MySQLSettings struct { // set this attribute to false. CleanSourceMetadataOnMismatch *bool `type:"boolean"` - // Database name for the endpoint. + // Database name for the endpoint. For a MySQL source or target endpoint, don't + // explicitly specify the database using the DatabaseName request parameter + // on either the CreateEndpoint or ModifyEndpoint API call. Specifying DatabaseName + // when you create or modify a MySQL endpoint replicates all the task tables + // to this single database. For MySQL endpoints, you specify the database only + // when you specify the schema in the table-mapping rules of the DMS task. DatabaseName *string `type:"string"` // Specifies how often to check the binary log for new changes/events when the @@ -14080,8 +14139,7 @@ type MySQLSettings struct { // // Example: eventsPollInterval=5; // - // In the example, AWS DMS checks for changes in the binary logs every five - // seconds. + // In the example, DMS checks for changes in the binary logs every five seconds. EventsPollInterval *int64 `type:"integer"` // Specifies the maximum size (in KB) of any .csv file used to transfer data @@ -14105,19 +14163,20 @@ type MySQLSettings struct { // Endpoint TCP port. Port *int64 `type:"integer"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the MySQL endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the MySQL endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -14242,45 +14301,46 @@ func (s *MySQLSettings) SetUsername(v string) *MySQLSettings { type NeptuneSettings struct { _ struct{} `type:"structure"` - // The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated + // The number of milliseconds for DMS to wait to retry a bulk-load of migrated // graph data to the Neptune target database before raising an error. The default // is 250. ErrorRetryDuration *int64 `type:"integer"` - // If you want AWS Identity and Access Management (IAM) authorization enabled - // for this endpoint, set this parameter to true. Then attach the appropriate - // IAM policy document to your service role specified by ServiceAccessRoleArn. - // The default is false. + // If you want Identity and Access Management (IAM) authorization enabled for + // this endpoint, set this parameter to true. Then attach the appropriate IAM + // policy document to your service role specified by ServiceAccessRoleArn. The + // default is false. IamAuthEnabled *bool `type:"boolean"` // The maximum size in kilobytes of migrated graph data stored in a .csv file - // before AWS DMS bulk-loads the data to the Neptune target database. The default - // is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, - // ready to store the next batch of migrated graph data. + // before DMS bulk-loads the data to the Neptune target database. The default + // is 1,048,576 KB. If the bulk load is successful, DMS clears the bucket, ready + // to store the next batch of migrated graph data. MaxFileSize *int64 `type:"integer"` - // The number of times for AWS DMS to retry a bulk load of migrated graph data - // to the Neptune target database before raising an error. The default is 5. + // The number of times for DMS to retry a bulk load of migrated graph data to + // the Neptune target database before raising an error. The default is 5. MaxRetryCount *int64 `type:"integer"` - // A folder path where you want AWS DMS to store migrated graph data in the - // S3 bucket specified by S3BucketName + // A folder path where you want DMS to store migrated graph data in the S3 bucket + // specified by S3BucketName // // S3BucketFolder is a required field S3BucketFolder *string `type:"string" required:"true"` - // The name of the Amazon S3 bucket where AWS DMS can temporarily store migrated + // The name of the Amazon S3 bucket where DMS can temporarily store migrated // graph data in .csv files before bulk-loading it to the Neptune target database. - // AWS DMS maps the SQL source data to graph data before storing it in these - // .csv files. + // DMS maps the SQL source data to graph data before storing it in these .csv + // files. // // S3BucketName is a required field S3BucketName *string `type:"string" required:"true"` // The Amazon Resource Name (ARN) of the service role that you created for the - // Neptune target endpoint. For more information, see Creating an IAM Service - // Role for Accessing Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.ServiceRole) - // in the AWS Database Migration Service User Guide. + // Neptune target endpoint. The role must allow the iam:PassRole action. For + // more information, see Creating an IAM Service Role for Accessing Amazon Neptune + // as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.ServiceRole) + // in the Database Migration Service User Guide. ServiceAccessRoleArn *string `type:"string"` } @@ -14370,27 +14430,32 @@ type OracleSettings struct { // logging. AddSupplementalLogging *bool `type:"boolean"` - // Set this attribute with archivedLogDestId in a primary/ standby setup. This - // attribute is useful in the case of a switchover. In this case, AWS DMS needs + // Set this attribute with ArchivedLogDestId in a primary/ standby setup. This + // attribute is useful in the case of a switchover. In this case, DMS needs // to know which destination to get archive redo logs from to read changes. // This need arises because the previous primary instance is now a standby instance // after switchover. + // + // Although DMS supports the use of the Oracle RESETLOGS option to open the + // database, never use RESETLOGS unless necessary. For additional information + // about RESETLOGS, see RMAN Data Repair Concepts (https://docs.oracle.com/en/database/oracle/oracle-database/19/bradv/rman-data-repair-concepts.html#GUID-1805CCF7-4AF2-482D-B65A-998192F89C2B) + // in the Oracle Database Backup and Recovery User's Guide. AdditionalArchivedLogDestId *int64 `type:"integer"` // Set this attribute to true to enable replication of Oracle tables containing // columns that are nested tables or defined types. AllowSelectNestedTables *bool `type:"boolean"` - // Specifies the destination of the archived redo logs. The value should be - // the same as the DEST_ID number in the v$archived_log table. When working - // with multiple log destinations (DEST_ID), we recommend that you to specify - // an archived redo logs location identifier. Doing this improves performance + // Specifies the ID of the destination for the archived redo logs. This value + // should be the same as a number in the dest_id column of the v$archived_log + // view. If you work with an additional redo log destination, use the AdditionalArchivedLogDestId + // option to specify the additional destination ID. Doing this improves performance // by ensuring that the correct logs are accessed from the outset. ArchivedLogDestId *int64 `type:"integer"` - // When this field is set to Y, AWS DMS only accesses the archived redo logs. - // If the archived redo logs are stored on Oracle ASM only, the AWS DMS user - // account needs to be granted ASM privileges. + // When this field is set to Y, DMS only accesses the archived redo logs. If + // the archived redo logs are stored on Oracle ASM only, the DMS user account + // needs to be granted ASM privileges. ArchivedLogsOnly *bool `type:"boolean"` // For an Oracle source endpoint, your Oracle Automatic Storage Management (ASM) @@ -14431,7 +14496,7 @@ type OracleSettings struct { DirectPathNoLog *bool `type:"boolean"` // When set to true, this attribute specifies a parallel load when useDirectPathFullLoad - // is set to Y. This attribute also only applies when you use the AWS DMS parallel + // is set to Y. This attribute also only applies when you use the DMS parallel // load feature. Note that the target table cannot have any constraints or indexes. DirectPathParallelLoad *bool `type:"boolean"` @@ -14459,7 +14524,7 @@ type OracleSettings struct { OraclePathPrefix *string `type:"string"` // Set this attribute to change the number of threads that DMS configures to - // perform a Change Data Capture (CDC) load using Oracle Automatic Storage Management + // perform a change data capture (CDC) load using Oracle Automatic Storage Management // (ASM). You can specify an integer value between 2 (the default) and 8 (the // maximum). Use this attribute together with the readAheadBlocks attribute. ParallelAsmReadThreads *int64 `type:"integer"` @@ -14471,7 +14536,7 @@ type OracleSettings struct { Port *int64 `type:"integer"` // Set this attribute to change the number of read-ahead blocks that DMS configures - // to perform a Change Data Capture (CDC) load using Oracle Automatic Storage + // to perform a change data capture (CDC) load using Oracle Automatic Storage // Management (ASM). You can specify an integer value between 1000 (the default) // and 200,000 (the maximum). ReadAheadBlocks *int64 `type:"integer"` @@ -14491,24 +14556,25 @@ type OracleSettings struct { // Example: retryInterval=6; RetryInterval *int64 `type:"integer"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the Oracle endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the Oracle endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // Required only if your Oracle endpoint uses Advanced Storage Manager (ASM). - // The full ARN of the IAM role that specifies AWS DMS as the trusted entity - // and grants the required permissions to access the SecretsManagerOracleAsmSecret. + // The full ARN of the IAM role that specifies DMS as the trusted entity and + // grants the required permissions to access the SecretsManagerOracleAsmSecret. // This SecretsManagerOracleAsmSecret has the secret value that allows access // to the Oracle ASM of the endpoint. // @@ -14517,9 +14583,9 @@ type OracleSettings struct { // Or you can specify clear-text values for AsmUserName, AsmPassword, and AsmServerName. // You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret // and the SecretsManagerOracleAsmAccessRoleArn and SecretsManagerOracleAsmSecretId - // required to access it, see Using secrets to access AWS Database Migration - // Service resources (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // required to access it, see Using secrets to access Database Migration Service + // resources (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerOracleAsmAccessRoleArn *string `type:"string"` // Required only if your Oracle endpoint uses Advanced Storage Manager (ASM). @@ -14536,9 +14602,8 @@ type OracleSettings struct { // Reader. It is also the TDE_Password part of the comma-separated value you // set to the Password request parameter when you create the endpoint. The SecurityDbEncryptian // setting is related to this SecurityDbEncryptionName setting. For more information, - // see Supported encryption methods for using Oracle as a source for AWS DMS - // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) - // in the AWS Database Migration Service User Guide. + // see Supported encryption methods for using Oracle as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) + // in the Database Migration Service User Guide. SecurityDbEncryption *string `type:"string" sensitive:"true"` // For an Oracle source endpoint, the name of a key used for the transparent @@ -14547,8 +14612,8 @@ type OracleSettings struct { // setting. For more information on setting the key name value of SecurityDbEncryptionName, // see the information and example for setting the securityDbEncryptionName // extra connection attribute in Supported encryption methods for using Oracle - // as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) - // in the AWS Database Migration Service User Guide. + // as a source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) + // in the Database Migration Service User Guide. SecurityDbEncryptionName *string `type:"string"` // Fully qualified domain name of the endpoint. @@ -14560,11 +14625,42 @@ type OracleSettings struct { // and set SpatialDataOptionToGeoJsonFunctionName to call it instead. SpatialDataOptionToGeoJsonFunctionName *string `type:"string"` + // Use this attribute to specify a time in minutes for the delay in standby + // sync. If the source is an Oracle Active Data Guard standby database, use + // this attribute to specify the time lag between primary and standby databases. + // + // In DMS, you can create an Oracle CDC task that uses an Active Data Guard + // standby instance as a source for replicating ongoing changes. Doing this + // eliminates the need to connect to an active database that might be in production. + StandbyDelayTime *int64 `type:"integer"` + // Set this attribute to true in order to use the Binary Reader to capture change // data for an Amazon RDS for Oracle as the source. This tells the DMS instance // to use any specified prefix replacement to access all online redo logs. UseAlternateFolderForOnline *bool `type:"boolean"` + // Set this attribute to Y to capture change data using the Binary Reader utility. + // Set UseLogminerReader to N to set this attribute to Y. To use Binary Reader + // with Amazon RDS for Oracle as the source, you set additional attributes. + // For more information about using this setting with Oracle Automatic Storage + // Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC). + UseBFile *bool `type:"boolean"` + + // Set this attribute to Y to have DMS use a direct path full load. Specify + // this value to use the direct path protocol in the Oracle Call Interface (OCI). + // By using this OCI protocol, you can bulk-load Oracle target tables during + // a full load. + UseDirectPathFullLoad *bool `type:"boolean"` + + // Set this attribute to Y to capture change data using the Oracle LogMiner + // utility (the default). Set this attribute to N if you want to access the + // redo logs as a binary file. When you set UseLogminerReader to N, also set + // UseBfile to Y. For more information on this setting and using Oracle ASM, + // see Using Oracle LogMiner or DMS Binary Reader for CDC (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC) + // in the DMS User Guide. + UseLogminerReader *bool `type:"boolean"` + // Set this string attribute to the required value in order to use the Binary // Reader to capture change data for an Amazon RDS for Oracle as the source. // This value specifies the path prefix used to replace the default Oracle root @@ -14777,12 +14873,36 @@ func (s *OracleSettings) SetSpatialDataOptionToGeoJsonFunctionName(v string) *Or return s } +// SetStandbyDelayTime sets the StandbyDelayTime field's value. +func (s *OracleSettings) SetStandbyDelayTime(v int64) *OracleSettings { + s.StandbyDelayTime = &v + return s +} + // SetUseAlternateFolderForOnline sets the UseAlternateFolderForOnline field's value. func (s *OracleSettings) SetUseAlternateFolderForOnline(v bool) *OracleSettings { s.UseAlternateFolderForOnline = &v return s } +// SetUseBFile sets the UseBFile field's value. +func (s *OracleSettings) SetUseBFile(v bool) *OracleSettings { + s.UseBFile = &v + return s +} + +// SetUseDirectPathFullLoad sets the UseDirectPathFullLoad field's value. +func (s *OracleSettings) SetUseDirectPathFullLoad(v bool) *OracleSettings { + s.UseDirectPathFullLoad = &v + return s +} + +// SetUseLogminerReader sets the UseLogminerReader field's value. +func (s *OracleSettings) SetUseLogminerReader(v bool) *OracleSettings { + s.UseLogminerReader = &v + return s +} + // SetUsePathPrefix sets the UsePathPrefix field's value. func (s *OracleSettings) SetUsePathPrefix(v string) *OracleSettings { s.UsePathPrefix = &v @@ -14826,7 +14946,7 @@ type OrderableReplicationInstance struct { // The value returned when the specified EngineVersion of the replication instance // is in Beta or test mode. This indicates some features might not work as expected. // - // AWS DMS supports the ReleaseStatus parameter in versions 3.1.4 and later. + // DMS supports the ReleaseStatus parameter in versions 3.1.4 and later. ReleaseStatus *string `type:"string" enum:"ReleaseStatusValues"` // The compute and memory capacity of the replication instance as defined for @@ -14834,8 +14954,8 @@ type OrderableReplicationInstance struct { // class dms.c4.large, set this parameter to "dms.c4.large". // // For more information on the settings and capacities for the available replication - // instance classes, see Selecting the right AWS DMS replication instance for - // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). + // instance classes, see Selecting the right DMS replication instance for your + // migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` // The type of storage used by the replication instance. @@ -14906,9 +15026,9 @@ func (s *OrderableReplicationInstance) SetStorageType(v string) *OrderableReplic return s } -// Describes a maintenance action pending for an AWS DMS resource, including -// when and how it will be applied. This data type is a response element to -// the DescribePendingMaintenanceActions operation. +// Describes a maintenance action pending for an DMS resource, including when +// and how it will be applied. This data type is a response element to the DescribePendingMaintenanceActions +// operation. type PendingMaintenanceAction struct { _ struct{} `type:"structure"` @@ -14992,14 +15112,14 @@ func (s *PendingMaintenanceAction) SetOptInStatus(v string) *PendingMaintenanceA type PostgreSQLSettings struct { _ struct{} `type:"structure"` - // For use with change data capture (CDC) only, this attribute has AWS DMS bypass + // For use with change data capture (CDC) only, this attribute has DMS bypass // foreign keys and user triggers to reduce the time it takes to bulk load data. // // Example: afterConnectScript=SET session_replication_role='replica' AfterConnectScript *string `type:"string"` - // To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL - // database when the task starts. You can later remove these artifacts. + // To capture DDL events, DMS creates various artifacts in the PostgreSQL database + // when the task starts. You can later remove these artifacts. // // If this value is set to N, you don't have to create tables or triggers on // the source database. @@ -15026,6 +15146,18 @@ type PostgreSQLSettings struct { // fails instead of truncating the LOB data. FailTasksOnLobTruncation *bool `type:"boolean"` + // If this attribute is set to true, the write-ahead log (WAL) heartbeat keeps + // restart_lsn moving and prevents storage full scenarios. The WAL heartbeat + // mimics a dummy transaction, so that idle logical replication slots don't + // hold onto old WAL logs that result in storage full situations on the source. + HeartbeatEnable *bool `type:"boolean"` + + // Sets the WAL heartbeat frequency (in minutes). + HeartbeatFrequency *int64 `type:"integer"` + + // Sets the schema in which the heartbeat artifacts are created. + HeartbeatSchema *string `type:"string"` + // Specifies the maximum size (in KB) of any .csv file used to transfer data // to PostgreSQL. // @@ -15035,22 +15167,26 @@ type PostgreSQLSettings struct { // Endpoint connection password. Password *string `type:"string" sensitive:"true"` + // Specifies the plugin to use to create a replication slot. + PluginName *string `type:"string" enum:"PluginNameValue"` + // Endpoint TCP port. Port *int64 `type:"integer"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the PostgreSQL endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the PostgreSQL endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -15063,7 +15199,7 @@ type PostgreSQLSettings struct { // Sets the name of a previously created logical replication slot for a CDC // load of the PostgreSQL source instance. // - // When used with the AWS DMS API CdcStartPosition request parameter, this attribute + // When used with the DMS API CdcStartPosition request parameter, this attribute // also enables using native CDC start points. SlotName *string `type:"string"` @@ -15117,6 +15253,24 @@ func (s *PostgreSQLSettings) SetFailTasksOnLobTruncation(v bool) *PostgreSQLSett return s } +// SetHeartbeatEnable sets the HeartbeatEnable field's value. +func (s *PostgreSQLSettings) SetHeartbeatEnable(v bool) *PostgreSQLSettings { + s.HeartbeatEnable = &v + return s +} + +// SetHeartbeatFrequency sets the HeartbeatFrequency field's value. +func (s *PostgreSQLSettings) SetHeartbeatFrequency(v int64) *PostgreSQLSettings { + s.HeartbeatFrequency = &v + return s +} + +// SetHeartbeatSchema sets the HeartbeatSchema field's value. +func (s *PostgreSQLSettings) SetHeartbeatSchema(v string) *PostgreSQLSettings { + s.HeartbeatSchema = &v + return s +} + // SetMaxFileSize sets the MaxFileSize field's value. func (s *PostgreSQLSettings) SetMaxFileSize(v int64) *PostgreSQLSettings { s.MaxFileSize = &v @@ -15129,6 +15283,12 @@ func (s *PostgreSQLSettings) SetPassword(v string) *PostgreSQLSettings { return s } +// SetPluginName sets the PluginName field's value. +func (s *PostgreSQLSettings) SetPluginName(v string) *PostgreSQLSettings { + s.PluginName = &v + return s +} + // SetPort sets the Port field's value. func (s *PostgreSQLSettings) SetPort(v int64) *PostgreSQLSettings { s.Port = &v @@ -15257,14 +15417,14 @@ type RedshiftSettings struct { // An S3 folder where the comma-separated-value (.csv) files are stored before // being uploaded to the target Redshift cluster. // - // For full load mode, AWS DMS converts source records into .csv files and loads - // them to the BucketFolder/TableID path. AWS DMS uses the Redshift COPY command + // For full load mode, DMS converts source records into .csv files and loads + // them to the BucketFolder/TableID path. DMS uses the Redshift COPY command // to upload the .csv files to the target table. The files are deleted once // the COPY operation has finished. For more information, see COPY (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html) // in the Amazon Redshift Database Developer Guide. // - // For change-data-capture (CDC) mode, AWS DMS creates a NetChanges table, and - // loads the .csv files to this BucketFolder/NetChangesTableID path. + // For change-data-capture (CDC) mode, DMS creates a NetChanges table, and loads + // the .csv files to this BucketFolder/NetChangesTableID path. BucketFolder *string `type:"string"` // The name of the intermediate S3 bucket used to store .csv files before uploading @@ -15300,7 +15460,7 @@ type RedshiftSettings struct { // to auto. DateFormat *string `type:"string"` - // A value that specifies whether AWS DMS should migrate empty CHAR and VARCHAR + // A value that specifies whether DMS should migrate empty CHAR and VARCHAR // fields as NULL. A value of true sets empty CHAR and VARCHAR fields to null. // The default is false. EmptyAsNull *bool `type:"boolean"` @@ -15313,8 +15473,8 @@ type RedshiftSettings struct { // EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the // existing value from SSE_S3 to SSE_KMS. // - // To use SSE_S3, create an AWS Identity and Access Management (IAM) role with - // a policy that allows "arn:aws:s3:::*" to use the following actions: "s3:PutObject", + // To use SSE_S3, create an Identity and Access Management (IAM) role with a + // policy that allows "arn:aws:s3:::*" to use the following actions: "s3:PutObject", // "s3:ListBucket" EncryptionMode *string `type:"string" enum:"EncryptionModeValue"` @@ -15336,8 +15496,8 @@ type RedshiftSettings struct { FileTransferUploadStreams *int64 `type:"integer"` // The amount of time to wait (in milliseconds) before timing out of operations - // performed by AWS DMS on a Redshift cluster, such as Redshift COPY, INSERT, - // DELETE, and UPDATE. + // performed by DMS on a Redshift cluster, such as Redshift COPY, INSERT, DELETE, + // and UPDATE. LoadTimeout *int64 `type:"integer"` // The maximum size (in KB) of any .csv file used to load data on an S3 bucket @@ -15363,19 +15523,20 @@ type RedshiftSettings struct { // A list of characters that you want to replace. Use with ReplaceChars. ReplaceInvalidChars *string `type:"string"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the Amazon Redshift endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the Amazon Redshift endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -15385,13 +15546,13 @@ type RedshiftSettings struct { // The name of the Amazon Redshift cluster you are using. ServerName *string `type:"string"` - // The AWS KMS key ID. If you are using SSE_KMS for the EncryptionMode, provide + // The KMS key ID. If you are using SSE_KMS for the EncryptionMode, provide // this key ID. The key that you use needs an attached policy that enables IAM // user permissions and allows use of the key. ServerSideEncryptionKmsKeyId *string `type:"string"` // The Amazon Resource Name (ARN) of the IAM role that has access to the Amazon - // Redshift service. + // Redshift service. The role must allow the iam:PassRole action. ServiceAccessRoleArn *string `type:"string"` // The time format that you want to use. Valid values are auto (case-sensitive), @@ -15849,11 +16010,11 @@ func (s *ReloadTablesOutput) SetReplicationTaskArn(v string) *ReloadTablesOutput return s } -// Removes one or more tags from an AWS DMS resource. +// Removes one or more tags from an DMS resource. type RemoveTagsFromResourceInput struct { _ struct{} `type:"structure"` - // An AWS DMS resource from which you want to remove tag(s). The value for this + // An DMS resource from which you want to remove tag(s). The value for this // parameter is an Amazon Resource Name (ARN). // // ResourceArn is a required field @@ -15952,14 +16113,14 @@ type ReplicationInstance struct { // The time the replication instance was created. InstanceCreateTime *time.Time `type:"timestamp"` - // An AWS KMS key identifier that is used to encrypt the data on the replication + // An KMS key identifier that is used to encrypt the data on the replication // instance. // - // If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses - // your default encryption key. + // If you don't specify a value for the KmsKeyId parameter, then DMS uses your + // default encryption key. // - // AWS KMS creates the default encryption key for your AWS account. Your AWS - // account has a different default encryption key for each AWS Region. + // KMS creates the default encryption key for your account. Your account has + // a different default encryption key for each Region. KmsKeyId *string `type:"string"` // Specifies whether the replication instance is a Multi-AZ deployment. You @@ -15984,11 +16145,11 @@ type ReplicationInstance struct { // The compute and memory capacity of the replication instance as defined for // the specified replication instance class. It is a required parameter, although - // a defualt value is pre-selected in the DMS console. + // a default value is pre-selected in the DMS console. // // For more information on the settings and capacities for the available replication - // instance classes, see Selecting the right AWS DMS replication instance for - // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). + // instance classes, see Selecting the right DMS replication instance for your + // migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` // The replication instance identifier is a required parameter. This parameter @@ -16272,8 +16433,8 @@ type ReplicationPendingModifiedValues struct { // the specified replication instance class. // // For more information on the settings and capacities for the available replication - // instance classes, see Selecting the right AWS DMS replication instance for - // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). + // instance classes, see Selecting the right DMS replication instance for your + // migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` } @@ -16594,7 +16755,7 @@ type ReplicationTask struct { // Supplemental information that the task requires to migrate the data for certain // source and target endpoints. For more information, see Specifying Supplemental // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) - // in the AWS Database Migration Service User Guide. + // in the Database Migration Service User Guide. TaskData *string `type:"string"` } @@ -16727,6 +16888,9 @@ type ReplicationTaskAssessmentResult struct { _ struct{} `type:"structure"` // The task assessment results in JSON format. + // + // The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn + // in the request. AssessmentResults *string `type:"string"` // The file containing the results of the task assessment. @@ -16746,6 +16910,9 @@ type ReplicationTaskAssessmentResult struct { ReplicationTaskLastAssessmentDate *time.Time `type:"timestamp"` // The URL of the S3 object containing the task assessment results. + // + // The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn + // in the request. S3ObjectUrl *string `type:"string"` } @@ -16832,18 +16999,18 @@ type ReplicationTaskAssessmentRun struct { // Encryption mode used to encrypt the assessment run results. ResultEncryptionMode *string `type:"string"` - // ARN of the AWS KMS encryption key used to encrypt the assessment run results. + // ARN of the KMS encryption key used to encrypt the assessment run results. ResultKmsKeyArn *string `type:"string"` - // Amazon S3 bucket where AWS DMS stores the results of this assessment run. + // Amazon S3 bucket where DMS stores the results of this assessment run. ResultLocationBucket *string `type:"string"` - // Folder in an Amazon S3 bucket where AWS DMS stores the results of this assessment + // Folder in an Amazon S3 bucket where DMS stores the results of this assessment // run. ResultLocationFolder *string `type:"string"` // ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun - // operation. + // operation. The role must allow the iam:PassRole action. ServiceAccessRoleArn *string `type:"string"` // Assessment run status. @@ -17304,7 +17471,7 @@ func (s *ResourceNotFoundFault) RequestID() string { return s.RespMetadata.RequestID } -// Identifies an AWS DMS resource and any pending actions for it. +// Identifies an DMS resource and any pending actions for it. type ResourcePendingMaintenanceActions struct { _ struct{} `type:"structure"` @@ -17313,7 +17480,7 @@ type ResourcePendingMaintenanceActions struct { // The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance // action applies to. For information about creating an ARN, see Constructing - // an Amazon Resource Name (ARN) for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.AWS.ARN.html) + // an Amazon Resource Name (ARN) for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.AWS.ARN.html) // in the DMS documentation. ResourceIdentifier *string `type:"string"` } @@ -17534,10 +17701,10 @@ type S3Settings struct { // or UPDATE operations at the source. For more information about how these // settings work together, see Indicating Source DB Operations in Migrated S3 // Data (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps) - // in the AWS Database Migration Service User Guide.. + // in the Database Migration Service User Guide.. // - // AWS DMS supports the use of the CdcInsertsAndUpdates parameter in versions - // 3.3.1 and later. + // DMS supports the use of the CdcInsertsAndUpdates parameter in versions 3.3.1 + // and later. // // CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the // same endpoint. Set either CdcInsertsOnly or CdcInsertsAndUpdates to true @@ -17559,9 +17726,9 @@ type S3Settings struct { // every CDC record is written without a first field to indicate the INSERT // operation at the source. For more information about how these settings work // together, see Indicating Source DB Operations in Migrated S3 Data (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps) - // in the AWS Database Migration Service User Guide.. + // in the Database Migration Service User Guide.. // - // AWS DMS supports the interaction described preceding between the CdcInsertsOnly + // DMS supports the interaction described preceding between the CdcInsertsOnly // and IncludeOpForFullLoad parameters in versions 3.1.4 and later. // // CdcInsertsOnly and CdcInsertsAndUpdates can't both be set to true for the @@ -17571,28 +17738,28 @@ type S3Settings struct { // Specifies the folder path of CDC files. For an S3 source, this setting is // required if a task captures change data; otherwise, it's optional. If CdcPath - // is set, AWS DMS reads CDC files from this path and replicates the data changes + // is set, DMS reads CDC files from this path and replicates the data changes // to the target endpoint. For an S3 target if you set PreserveTransactions // (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions) - // to true, AWS DMS verifies that you have set this parameter to a folder path - // on your S3 target where AWS DMS can save the transaction order for the CDC - // load. AWS DMS creates this CDC folder path in either your S3 target working - // directory or the S3 target location specified by BucketFolder (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder) + // to true, DMS verifies that you have set this parameter to a folder path on + // your S3 target where DMS can save the transaction order for the CDC load. + // DMS creates this CDC folder path in either your S3 target working directory + // or the S3 target location specified by BucketFolder (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder) // and BucketName (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName). // // For example, if you specify CdcPath as MyChangedData, and you specify BucketName - // as MyTargetBucket but do not specify BucketFolder, AWS DMS creates the CDC - // folder path following: MyTargetBucket/MyChangedData. + // as MyTargetBucket but do not specify BucketFolder, DMS creates the CDC folder + // path following: MyTargetBucket/MyChangedData. // // If you specify the same CdcPath, and you specify BucketName as MyTargetBucket - // and BucketFolder as MyTargetData, AWS DMS creates the CDC folder path following: + // and BucketFolder as MyTargetData, DMS creates the CDC folder path following: // MyTargetBucket/MyTargetData/MyChangedData. // // For more information on CDC including transaction order on an S3 target, // see Capturing data changes (CDC) including transaction order on the S3 target // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath). // - // This setting is supported in AWS DMS versions 3.4.2 and later. + // This setting is supported in DMS versions 3.4.2 and later. CdcPath *string `type:"string"` // An optional parameter to use GZIP to compress the target files. Set to GZIP @@ -17607,12 +17774,12 @@ type S3Settings struct { // This setting only applies if your Amazon S3 output files during a change // data capture (CDC) load are written in .csv format. If UseCsvNoSupValue (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue) - // is set to true, specify a string value that you want AWS DMS to use for all - // columns not included in the supplemental log. If you do not specify a string - // value, AWS DMS uses the null value for these columns regardless of the UseCsvNoSupValue + // is set to true, specify a string value that you want DMS to use for all columns + // not included in the supplemental log. If you do not specify a string value, + // DMS uses the null value for these columns regardless of the UseCsvNoSupValue // setting. // - // This setting is supported in AWS DMS versions 3.4.1 and later. + // This setting is supported in DMS versions 3.4.1 and later. CsvNoSupValue *string `type:"string"` // The delimiter used to separate rows in the .csv file for both source and @@ -17639,7 +17806,7 @@ type S3Settings struct { // When set to true, this parameter partitions S3 bucket folders based on transaction // commit dates. The default value is false. For more information about date-based - // folder partitoning, see Using date-based folder partitioning (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning). + // folder partitioning, see Using date-based folder partitioning (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning). DatePartitionEnabled *bool `type:"boolean"` // Identifies the sequence of the date format to use during folder partitioning. @@ -17680,8 +17847,8 @@ type S3Settings struct { // EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the // existing value from SSE_S3 to SSE_KMS. // - // To use SSE_S3, you need an AWS Identity and Access Management (IAM) role - // with permission to allow "arn:aws:s3:::dms-*" to use the following actions: + // To use SSE_S3, you need an Identity and Access Management (IAM) role with + // permission to allow "arn:aws:s3:::dms-*" to use the following actions: // // * s3:CreateBucket // @@ -17713,8 +17880,7 @@ type S3Settings struct { // value (.csv) output files only to indicate how the rows were added to the // source database. // - // AWS DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and - // later. + // DMS supports the IncludeOpForFullLoad parameter in versions 3.1.4 and later. // // For full load, records can only be inserted. By default (the false setting), // no information is recorded in these output files for a full load to indicate @@ -17727,26 +17893,26 @@ type S3Settings struct { // parameters for output to .csv files only. For more information about how // these settings work together, see Indicating Source DB Operations in Migrated // S3 Data (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps) - // in the AWS Database Migration Service User Guide.. + // in the Database Migration Service User Guide.. IncludeOpForFullLoad *bool `type:"boolean"` // A value that specifies the precision of any TIMESTAMP column values that // are written to an Amazon S3 object file in .parquet format. // - // AWS DMS supports the ParquetTimestampInMillisecond parameter in versions - // 3.1.4 and later. + // DMS supports the ParquetTimestampInMillisecond parameter in versions 3.1.4 + // and later. // - // When ParquetTimestampInMillisecond is set to true or y, AWS DMS writes all - // TIMESTAMP columns in a .parquet formatted file with millisecond precision. - // Otherwise, DMS writes them with microsecond precision. + // When ParquetTimestampInMillisecond is set to true or y, DMS writes all TIMESTAMP + // columns in a .parquet formatted file with millisecond precision. Otherwise, + // DMS writes them with microsecond precision. // - // Currently, Amazon Athena and AWS Glue can handle only millisecond precision - // for TIMESTAMP values. Set this parameter to true for S3 endpoint object files + // Currently, Amazon Athena and Glue can handle only millisecond precision for + // TIMESTAMP values. Set this parameter to true for S3 endpoint object files // that are .parquet formatted only if you plan to query or process the data - // with Athena or AWS Glue. + // with Athena or Glue. // - // AWS DMS writes any TIMESTAMP column values written to an S3 file in .csv - // format with microsecond precision. + // DMS writes any TIMESTAMP column values written to an S3 file in .csv format + // with microsecond precision. // // Setting ParquetTimestampInMillisecond has no effect on the string format // of the timestamp column value that is inserted by setting the TimestampColumnName @@ -17757,12 +17923,12 @@ type S3Settings struct { // (the default) or parquet_2_0. ParquetVersion *string `type:"string" enum:"ParquetVersionValue"` - // If set to true, AWS DMS saves the transaction order for a change data capture + // If set to true, DMS saves the transaction order for a change data capture // (CDC) load on the Amazon S3 target specified by CdcPath (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath). // For more information, see Capturing data changes (CDC) including transaction // order on the S3 target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath). // - // This setting is supported in AWS DMS versions 3.4.2 and later. + // This setting is supported in DMS versions 3.4.2 and later. PreserveTransactions *bool `type:"boolean"` // The number of rows in a row group. A smaller row group size provides faster @@ -17774,24 +17940,23 @@ type S3Settings struct { // max row group length in bytes (64 * 1024 * 1024). RowGroupLength *int64 `type:"integer"` - // If you are using SSE_KMS for the EncryptionMode, provide the AWS KMS key - // ID. The key that you use needs an attached policy that enables AWS Identity - // and Access Management (IAM) user permissions and allows use of the key. + // If you are using SSE_KMS for the EncryptionMode, provide the KMS key ID. + // The key that you use needs an attached policy that enables Identity and Access + // Management (IAM) user permissions and allows use of the key. // // Here is a CLI example: aws dms create-endpoint --endpoint-identifier value // --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value ServerSideEncryptionKmsKeyId *string `type:"string"` - // The Amazon Resource Name (ARN) used by the service access IAM role. It is - // a required parameter that enables DMS to write and read objects from an S3 - // bucket. + // The Amazon Resource Name (ARN) used by the service to access the IAM role. + // The role must allow the iam:PassRole action. It is a required parameter that + // enables DMS to write and read objects from an S3 bucket. ServiceAccessRoleArn *string `type:"string"` - // A value that when nonblank causes AWS DMS to add a column with timestamp - // information to the endpoint data for an Amazon S3 target. + // A value that when nonblank causes DMS to add a column with timestamp information + // to the endpoint data for an Amazon S3 target. // - // AWS DMS supports the TimestampColumnName parameter in versions 3.1.4 and - // later. + // DMS supports the TimestampColumnName parameter in versions 3.1.4 and later. // // DMS includes an additional STRING column in the .csv or .parquet object files // of your migrated data when you set TimestampColumnName to a nonblank value. @@ -17813,11 +17978,10 @@ type S3Settings struct { // This setting applies if the S3 output files during a change data capture // (CDC) load are written in .csv format. If set to true for columns not included - // in the supplemental log, AWS DMS uses the value specified by CsvNoSupValue - // (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue). - // If not set or set to false, AWS DMS uses the null value for these columns. + // in the supplemental log, DMS uses the value specified by CsvNoSupValue (https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue). + // If not set or set to false, DMS uses the null value for these columns. // - // This setting is supported in AWS DMS versions 3.4.1 and later. + // This setting is supported in DMS versions 3.4.1 and later. UseCsvNoSupValue *bool `type:"boolean"` } @@ -18182,27 +18346,27 @@ type StartReplicationTaskAssessmentRunInput struct { // Space-separated list of names for specific individual assessments that you // want to exclude. These names come from the default list of individual assessments - // that AWS DMS supports for the associated migration task. This task is specified + // that DMS supports for the associated migration task. This task is specified // by ReplicationTaskArn. // // You can't set a value for Exclude if you also set a value for IncludeOnly // in the API operation. // - // To identify the names of the default individual assessments that AWS DMS - // supports for the associated migration task, run the DescribeApplicableIndividualAssessments + // To identify the names of the default individual assessments that DMS supports + // for the associated migration task, run the DescribeApplicableIndividualAssessments // operation using its own ReplicationTaskArn request parameter. Exclude []*string `type:"list"` // Space-separated list of names for specific individual assessments that you // want to include. These names come from the default list of individual assessments - // that AWS DMS supports for the associated migration task. This task is specified + // that DMS supports for the associated migration task. This task is specified // by ReplicationTaskArn. // // You can't set a value for IncludeOnly if you also set a value for Exclude // in the API operation. // - // To identify the names of the default individual assessments that AWS DMS - // supports for the associated migration task, run the DescribeApplicableIndividualAssessments + // To identify the names of the default individual assessments that DMS supports + // for the associated migration task, run the DescribeApplicableIndividualAssessments // operation using its own ReplicationTaskArn request parameter. IncludeOnly []*string `type:"list"` @@ -18213,32 +18377,33 @@ type StartReplicationTaskAssessmentRunInput struct { ReplicationTaskArn *string `type:"string" required:"true"` // Encryption mode that you can specify to encrypt the results of this assessment - // run. If you don't specify this request parameter, AWS DMS stores the assessment + // run. If you don't specify this request parameter, DMS stores the assessment // run results without encryption. You can specify one of the options following: // // * "SSE_S3" – The server-side encryption provided as a default by Amazon // S3. // - // * "SSE_KMS" – AWS Key Management Service (AWS KMS) encryption. This - // encryption can use either a custom KMS encryption key that you specify - // or the default KMS encryption key that DMS provides. + // * "SSE_KMS" – Key Management Service (KMS) encryption. This encryption + // can use either a custom KMS encryption key that you specify or the default + // KMS encryption key that DMS provides. ResultEncryptionMode *string `type:"string"` // ARN of a custom KMS encryption key that you specify when you set ResultEncryptionMode // to "SSE_KMS". ResultKmsKeyArn *string `type:"string"` - // Amazon S3 bucket where you want AWS DMS to store the results of this assessment + // Amazon S3 bucket where you want DMS to store the results of this assessment // run. // // ResultLocationBucket is a required field ResultLocationBucket *string `type:"string" required:"true"` - // Folder within an Amazon S3 bucket where you want AWS DMS to store the results + // Folder within an Amazon S3 bucket where you want DMS to store the results // of this assessment run. ResultLocationFolder *string `type:"string"` - // ARN of a service role needed to start the assessment run. + // ARN of the service role needed to start the assessment run. The role must + // allow the iam:PassRole action. // // ServiceAccessRoleArn is a required field ServiceAccessRoleArn *string `type:"string" required:"true"` @@ -18372,7 +18537,7 @@ type StartReplicationTaskInput struct { // replication slot should already be created and associated with the source // endpoint. You can verify this by setting the slotName extra connection attribute // to the name of this logical replication slot. For more information, see Extra - // Connection Attributes When Using PostgreSQL as a Source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). + // Connection Attributes When Using PostgreSQL as a Source for DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib). CdcStartPosition *string `type:"string"` // Indicates the start time for a change data capture (CDC) operation. Use either @@ -18718,12 +18883,12 @@ type SupportedEndpointType struct { // "kafka", "elasticsearch", "documentdb", "sqlserver", and "neptune". EngineName *string `type:"string"` - // The earliest AWS DMS engine version that supports this endpoint engine. Note - // that endpoint engines released with AWS DMS versions earlier than 3.1.1 do - // not return a value for this parameter. + // The earliest DMS engine version that supports this endpoint engine. Note + // that endpoint engines released with DMS versions earlier than 3.1.1 do not + // return a value for this parameter. ReplicationInstanceEngineMinimumVersion *string `type:"string"` - // Indicates if Change Data Capture (CDC) is supported. + // Indicates if change data capture (CDC) is supported. SupportsCDC *bool `type:"boolean"` } @@ -18780,19 +18945,20 @@ type SybaseSettings struct { // Endpoint TCP port. Port *int64 `type:"integer"` - // The full Amazon Resource Name (ARN) of the IAM role that specifies AWS DMS - // as the trusted entity and grants the required permissions to access the value - // in SecretsManagerSecret. SecretsManagerSecret has the value of the AWS Secrets - // Manager secret that allows access to the SAP ASE endpoint. + // The full Amazon Resource Name (ARN) of the IAM role that specifies DMS as + // the trusted entity and grants the required permissions to access the value + // in SecretsManagerSecret. The role must allow the iam:PassRole action. SecretsManagerSecret + // has the value of the Amazon Web Services Secrets Manager secret that allows + // access to the SAP ASE endpoint. // // You can specify one of two sets of values for these permissions. You can // specify the values for this setting and SecretsManagerSecretId. Or you can // specify clear-text values for UserName, Password, ServerName, and Port. You // can't specify both. For more information on creating this SecretsManagerSecret // and the SecretsManagerAccessRoleArn and SecretsManagerSecretId required to - // access it, see Using secrets to access AWS Database Migration Service resources - // (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) - // in the AWS Database Migration Service User Guide. + // access it, see Using secrets to access Database Migration Service resources + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) + // in the Database Migration Service User Guide. SecretsManagerAccessRoleArn *string `type:"string"` // The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that @@ -19137,8 +19303,8 @@ func (s *TableToReload) SetTableName(v string) *TableToReload { return s } -// A user-defined key-value pair that describes metadata added to an AWS DMS -// resource and that is used by operations such as the following: +// A user-defined key-value pair that describes metadata added to an DMS resource +// and that is used by operations such as the following: // // * AddTagsToResource // @@ -19664,6 +19830,26 @@ func ParquetVersionValue_Values() []string { } } +const ( + // PluginNameValueNoPreference is a PluginNameValue enum value + PluginNameValueNoPreference = "no-preference" + + // PluginNameValueTestDecoding is a PluginNameValue enum value + PluginNameValueTestDecoding = "test-decoding" + + // PluginNameValuePglogical is a PluginNameValue enum value + PluginNameValuePglogical = "pglogical" +) + +// PluginNameValue_Values returns all elements of the PluginNameValue enum +func PluginNameValue_Values() []string { + return []string{ + PluginNameValueNoPreference, + PluginNameValueTestDecoding, + PluginNameValuePglogical, + } +} + const ( // RefreshSchemasStatusTypeValueSuccessful is a RefreshSchemasStatusTypeValue enum value RefreshSchemasStatusTypeValueSuccessful = "successful" diff --git a/service/databasemigrationservice/doc.go b/service/databasemigrationservice/doc.go index d09037ad493..0288706c294 100644 --- a/service/databasemigrationservice/doc.go +++ b/service/databasemigrationservice/doc.go @@ -3,17 +3,15 @@ // Package databasemigrationservice provides the client and types for making API // requests to AWS Database Migration Service. // -// AWS Database Migration Service (AWS DMS) can migrate your data to and from -// the most widely used commercial and open-source databases such as Oracle, -// PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, -// MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous -// migrations such as Oracle to Oracle, as well as heterogeneous migrations -// between different database platforms, such as Oracle to MySQL or SQL Server -// to PostgreSQL. -// -// For more information about AWS DMS, see What Is AWS Database Migration Service? -// (https://docs.aws.amazon.com/dms/latest/userguide/Welcome.html) in the AWS -// Database Migration User Guide. +// Database Migration Service (DMS) can migrate your data to and from the most +// widely used commercial and open-source databases such as Oracle, PostgreSQL, +// Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and +// SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations +// such as Oracle to Oracle, as well as heterogeneous migrations between different +// database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL. +// +// For more information about DMS, see What Is Database Migration Service? (https://docs.aws.amazon.com/dms/latest/userguide/Welcome.html) +// in the Database Migration Service User Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01 for more information on this service. // diff --git a/service/databasemigrationservice/errors.go b/service/databasemigrationservice/errors.go index b02044ee188..2c83695a67a 100644 --- a/service/databasemigrationservice/errors.go +++ b/service/databasemigrationservice/errors.go @@ -11,8 +11,7 @@ const ( // ErrCodeAccessDeniedFault for service response error code // "AccessDeniedFault". // - // AWS DMS was denied access to the endpoint. Check that the role is correctly - // configured. + // DMS was denied access to the endpoint. Check that the role is correctly configured. ErrCodeAccessDeniedFault = "AccessDeniedFault" // ErrCodeInsufficientResourceCapacityFault for service response error code @@ -56,32 +55,31 @@ const ( // ErrCodeKMSFault for service response error code // "KMSFault". // - // An AWS Key Management Service (AWS KMS) error is preventing access to AWS - // KMS. + // An Key Management Service (KMS) error is preventing access to KMS. ErrCodeKMSFault = "KMSFault" // ErrCodeKMSInvalidStateFault for service response error code // "KMSInvalidStateFault". // - // The state of the specified AWS KMS resource isn't valid for this request. + // The state of the specified KMS resource isn't valid for this request. ErrCodeKMSInvalidStateFault = "KMSInvalidStateFault" // ErrCodeKMSKeyNotAccessibleFault for service response error code // "KMSKeyNotAccessibleFault". // - // AWS DMS cannot access the AWS KMS key. + // DMS cannot access the KMS key. ErrCodeKMSKeyNotAccessibleFault = "KMSKeyNotAccessibleFault" // ErrCodeKMSNotFoundFault for service response error code // "KMSNotFoundFault". // - // The specified AWS KMS entity or resource can't be found. + // The specified KMS entity or resource can't be found. ErrCodeKMSNotFoundFault = "KMSNotFoundFault" // ErrCodeKMSThrottlingFault for service response error code // "KMSThrottlingFault". // - // This request triggered AWS KMS request throttling. + // This request triggered KMS request throttling. ErrCodeKMSThrottlingFault = "KMSThrottlingFault" // ErrCodeReplicationSubnetGroupDoesNotCoverEnoughAZs for service response error code diff --git a/service/glue/api.go b/service/glue/api.go index 385f766ab65..0211d15ddc8 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -22936,6 +22936,10 @@ type CreateTriggerInput struct { // A description of the new trigger. Description *string `type:"string"` + // Batch condition that must be met (specified number of events received or + // batch time window expired) before EventBridge event trigger fires. + EventBatchingCondition *EventBatchingCondition `type:"structure"` + // The name of the trigger. // // Name is a required field @@ -23011,6 +23015,11 @@ func (s *CreateTriggerInput) Validate() error { } } } + if s.EventBatchingCondition != nil { + if err := s.EventBatchingCondition.Validate(); err != nil { + invalidParams.AddNested("EventBatchingCondition", err.(request.ErrInvalidParams)) + } + } if s.Predicate != nil { if err := s.Predicate.Validate(); err != nil { invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) @@ -23035,6 +23044,12 @@ func (s *CreateTriggerInput) SetDescription(v string) *CreateTriggerInput { return s } +// SetEventBatchingCondition sets the EventBatchingCondition field's value. +func (s *CreateTriggerInput) SetEventBatchingCondition(v *EventBatchingCondition) *CreateTriggerInput { + s.EventBatchingCondition = v + return s +} + // SetName sets the Name field's value. func (s *CreateTriggerInput) SetName(v string) *CreateTriggerInput { s.Name = &v @@ -26117,8 +26132,8 @@ func (s *DynamoDBTarget) SetScanRate(v float64) *DynamoDBTarget { return s } -// An edge represents a directed connection between two Glue components that -// are part of the workflow the edge belongs to. +// An edge represents a directed connection between two components on a workflow +// graph. type Edge struct { _ struct{} `type:"structure"` @@ -26461,6 +26476,63 @@ func (s *EvaluationMetrics) SetTransformType(v string) *EvaluationMetrics { return s } +// Batch condition that must be met (specified number of events received or +// batch time window expired) before EventBridge event trigger fires. +type EventBatchingCondition struct { + _ struct{} `type:"structure"` + + // Number of events that must be received from Amazon EventBridge before EventBridge + // event trigger fires. + // + // BatchSize is a required field + BatchSize *int64 `min:"1" type:"integer" required:"true"` + + // Window of time in seconds after which EventBridge event trigger fires. Window + // starts when first event is received. + BatchWindow *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s EventBatchingCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventBatchingCondition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EventBatchingCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventBatchingCondition"} + if s.BatchSize == nil { + invalidParams.Add(request.NewErrParamRequired("BatchSize")) + } + if s.BatchSize != nil && *s.BatchSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("BatchSize", 1)) + } + if s.BatchWindow != nil && *s.BatchWindow < 1 { + invalidParams.Add(request.NewErrParamMinValue("BatchWindow", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBatchSize sets the BatchSize field's value. +func (s *EventBatchingCondition) SetBatchSize(v int64) *EventBatchingCondition { + s.BatchSize = &v + return s +} + +// SetBatchWindow sets the BatchWindow field's value. +func (s *EventBatchingCondition) SetBatchWindow(v int64) *EventBatchingCondition { + s.BatchWindow = &v + return s +} + // An execution property of a job. type ExecutionProperty struct { _ struct{} `type:"structure"` @@ -35412,8 +35484,8 @@ func (s *NoScheduleException) RequestID() string { return s.RespMetadata.RequestID } -// A node represents an Glue component such as a trigger, or job, etc., that -// is part of a workflow. +// A node represents an Glue component (trigger, crawler, or job) on a workflow +// graph. type Node struct { _ struct{} `type:"structure"` @@ -39449,6 +39521,41 @@ func (s *StartWorkflowRunOutput) SetRunId(v string) *StartWorkflowRunOutput { return s } +// The batch condition that started the workflow run. Either the number of events +// in the batch size arrived, in which case the BatchSize member is non-zero, +// or the batch window expired, in which case the BatchWindow member is non-zero. +type StartingEventBatchCondition struct { + _ struct{} `type:"structure"` + + // Number of events in the batch. + BatchSize *int64 `type:"integer"` + + // Duration of the batch window in seconds. + BatchWindow *int64 `type:"integer"` +} + +// String returns the string representation +func (s StartingEventBatchCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartingEventBatchCondition) GoString() string { + return s.String() +} + +// SetBatchSize sets the BatchSize field's value. +func (s *StartingEventBatchCondition) SetBatchSize(v int64) *StartingEventBatchCondition { + s.BatchSize = &v + return s +} + +// SetBatchWindow sets the BatchWindow field's value. +func (s *StartingEventBatchCondition) SetBatchWindow(v int64) *StartingEventBatchCondition { + s.BatchWindow = &v + return s +} + type StopCrawlerInput struct { _ struct{} `type:"structure"` @@ -41246,6 +41353,10 @@ type Trigger struct { // A description of this trigger. Description *string `type:"string"` + // Batch condition that must be met (specified number of events received or + // batch time window expired) before EventBridge event trigger fires. + EventBatchingCondition *EventBatchingCondition `type:"structure"` + // Reserved for future use. Id *string `min:"1" type:"string"` @@ -41293,6 +41404,12 @@ func (s *Trigger) SetDescription(v string) *Trigger { return s } +// SetEventBatchingCondition sets the EventBatchingCondition field's value. +func (s *Trigger) SetEventBatchingCondition(v *EventBatchingCondition) *Trigger { + s.EventBatchingCondition = v + return s +} + // SetId sets the Id field's value. func (s *Trigger) SetId(v string) *Trigger { s.Id = &v @@ -41370,6 +41487,10 @@ type TriggerUpdate struct { // A description of this trigger. Description *string `type:"string"` + // Batch condition that must be met (specified number of events received or + // batch time window expired) before EventBridge event trigger fires. + EventBatchingCondition *EventBatchingCondition `type:"structure"` + // Reserved for future use. Name *string `min:"1" type:"string"` @@ -41409,6 +41530,11 @@ func (s *TriggerUpdate) Validate() error { } } } + if s.EventBatchingCondition != nil { + if err := s.EventBatchingCondition.Validate(); err != nil { + invalidParams.AddNested("EventBatchingCondition", err.(request.ErrInvalidParams)) + } + } if s.Predicate != nil { if err := s.Predicate.Validate(); err != nil { invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) @@ -41433,6 +41559,12 @@ func (s *TriggerUpdate) SetDescription(v string) *TriggerUpdate { return s } +// SetEventBatchingCondition sets the EventBatchingCondition field's value. +func (s *TriggerUpdate) SetEventBatchingCondition(v *EventBatchingCondition) *TriggerUpdate { + s.EventBatchingCondition = v + return s +} + // SetName sets the Name field's value. func (s *TriggerUpdate) SetName(v string) *TriggerUpdate { s.Name = &v @@ -43987,8 +44119,9 @@ func (s *VersionMismatchException) RequestID() string { return s.RespMetadata.RequestID } -// A workflow represents a flow in which Glue components should be run to complete -// a logical task. +// A workflow is a collection of multiple dependent Glue jobs and crawlers that +// are run to complete a complex ETL task. A workflow manages the execution +// and monitoring of all its jobs and crawlers. type Workflow struct { _ struct{} `type:"structure"` @@ -43996,6 +44129,8 @@ type Workflow struct { CreatedOn *time.Time `type:"timestamp"` // A collection of properties to be used as part of each execution of the workflow. + // The run properties are made available to each job in the workflow. A job + // can modify the properties for the next jobs in the flow. DefaultRunProperties map[string]*string `type:"map"` // A description of the workflow. @@ -44017,7 +44152,7 @@ type Workflow struct { // blank, there is no limit to the number of concurrent workflow runs. MaxConcurrentRuns *int64 `type:"integer"` - // The name of the workflow representing the flow. + // The name of the workflow. Name *string `min:"1" type:"string"` } @@ -44140,6 +44275,9 @@ type WorkflowRun struct { // The date and time when the workflow run was started. StartedOn *time.Time `type:"timestamp"` + // The batch condition that started the workflow run. + StartingEventBatchCondition *StartingEventBatchCondition `type:"structure"` + // The statistics of the run. Statistics *WorkflowRunStatistics `type:"structure"` @@ -44199,6 +44337,12 @@ func (s *WorkflowRun) SetStartedOn(v time.Time) *WorkflowRun { return s } +// SetStartingEventBatchCondition sets the StartingEventBatchCondition field's value. +func (s *WorkflowRun) SetStartingEventBatchCondition(v *StartingEventBatchCondition) *WorkflowRun { + s.StartingEventBatchCondition = v + return s +} + // SetStatistics sets the Statistics field's value. func (s *WorkflowRun) SetStatistics(v *WorkflowRunStatistics) *WorkflowRun { s.Statistics = v @@ -45482,6 +45626,9 @@ const ( // TriggerTypeOnDemand is a TriggerType enum value TriggerTypeOnDemand = "ON_DEMAND" + + // TriggerTypeEvent is a TriggerType enum value + TriggerTypeEvent = "EVENT" ) // TriggerType_Values returns all elements of the TriggerType enum @@ -45490,6 +45637,7 @@ func TriggerType_Values() []string { TriggerTypeScheduled, TriggerTypeConditional, TriggerTypeOnDemand, + TriggerTypeEvent, } } diff --git a/service/healthlake/api.go b/service/healthlake/api.go index 4c32e73a703..d2ebd7e5965 100644 --- a/service/healthlake/api.go +++ b/service/healthlake/api.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) const opCreateFHIRDatastore = "CreateFHIRDatastore" @@ -73,6 +74,9 @@ func (c *HealthLake) CreateFHIRDatastoreRequest(input *CreateFHIRDatastoreInput) // The user has exceeded their maximum number of allowed calls to the given // API. // +// * AccessDeniedException +// Access is denied. Your account is not authorized to perform this operation. +// // * InternalServerException // Unknown error occurs in the service. // @@ -610,348 +614,1318 @@ func (c *HealthLake) ListFHIRDatastoresPagesWithContext(ctx aws.Context, input * return p.Err() } -const opStartFHIRExportJob = "StartFHIRExportJob" +const opListFHIRExportJobs = "ListFHIRExportJobs" -// StartFHIRExportJobRequest generates a "aws/request.Request" representing the -// client's request for the StartFHIRExportJob operation. The "output" return +// ListFHIRExportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListFHIRExportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartFHIRExportJob for more information on using the StartFHIRExportJob +// See ListFHIRExportJobs for more information on using the ListFHIRExportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartFHIRExportJobRequest method. -// req, resp := client.StartFHIRExportJobRequest(params) +// // Example sending a request using the ListFHIRExportJobsRequest method. +// req, resp := client.ListFHIRExportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRExportJob -func (c *HealthLake) StartFHIRExportJobRequest(input *StartFHIRExportJobInput) (req *request.Request, output *StartFHIRExportJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/ListFHIRExportJobs +func (c *HealthLake) ListFHIRExportJobsRequest(input *ListFHIRExportJobsInput) (req *request.Request, output *ListFHIRExportJobsOutput) { op := &request.Operation{ - Name: opStartFHIRExportJob, + Name: opListFHIRExportJobs, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StartFHIRExportJobInput{} + input = &ListFHIRExportJobsInput{} } - output = &StartFHIRExportJobOutput{} + output = &ListFHIRExportJobsOutput{} req = c.newRequest(op, input, output) return } -// StartFHIRExportJob API operation for Amazon HealthLake. +// ListFHIRExportJobs API operation for Amazon HealthLake. // -// Begins a FHIR export job. +// Lists all FHIR export jobs associated with an account and their statuses. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon HealthLake's -// API operation StartFHIRExportJob for usage and error information. +// API operation ListFHIRExportJobs for usage and error information. // // Returned Error Types: // * ValidationException // The user input parameter was invalid. // -// * ThrottlingException -// The user has exceeded their maximum number of allowed calls to the given -// API. +// * ResourceNotFoundException +// The requested Data Store was not found. // // * AccessDeniedException // Access is denied. Your account is not authorized to perform this operation. // -// * ResourceNotFoundException -// The requested Data Store was not found. +// * ThrottlingException +// The user has exceeded their maximum number of allowed calls to the given +// API. // // * InternalServerException // Unknown error occurs in the service. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRExportJob -func (c *HealthLake) StartFHIRExportJob(input *StartFHIRExportJobInput) (*StartFHIRExportJobOutput, error) { - req, out := c.StartFHIRExportJobRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/ListFHIRExportJobs +func (c *HealthLake) ListFHIRExportJobs(input *ListFHIRExportJobsInput) (*ListFHIRExportJobsOutput, error) { + req, out := c.ListFHIRExportJobsRequest(input) return out, req.Send() } -// StartFHIRExportJobWithContext is the same as StartFHIRExportJob with the addition of +// ListFHIRExportJobsWithContext is the same as ListFHIRExportJobs with the addition of // the ability to pass a context and additional request options. // -// See StartFHIRExportJob for details on how to use this API operation. +// See ListFHIRExportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *HealthLake) StartFHIRExportJobWithContext(ctx aws.Context, input *StartFHIRExportJobInput, opts ...request.Option) (*StartFHIRExportJobOutput, error) { - req, out := c.StartFHIRExportJobRequest(input) +func (c *HealthLake) ListFHIRExportJobsWithContext(ctx aws.Context, input *ListFHIRExportJobsInput, opts ...request.Option) (*ListFHIRExportJobsOutput, error) { + req, out := c.ListFHIRExportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartFHIRImportJob = "StartFHIRImportJob" +// ListFHIRExportJobsPages iterates over the pages of a ListFHIRExportJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFHIRExportJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFHIRExportJobs operation. +// pageNum := 0 +// err := client.ListFHIRExportJobsPages(params, +// func(page *healthlake.ListFHIRExportJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *HealthLake) ListFHIRExportJobsPages(input *ListFHIRExportJobsInput, fn func(*ListFHIRExportJobsOutput, bool) bool) error { + return c.ListFHIRExportJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// StartFHIRImportJobRequest generates a "aws/request.Request" representing the -// client's request for the StartFHIRImportJob operation. The "output" return +// ListFHIRExportJobsPagesWithContext same as ListFHIRExportJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *HealthLake) ListFHIRExportJobsPagesWithContext(ctx aws.Context, input *ListFHIRExportJobsInput, fn func(*ListFHIRExportJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListFHIRExportJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListFHIRExportJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListFHIRExportJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListFHIRImportJobs = "ListFHIRImportJobs" + +// ListFHIRImportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListFHIRImportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartFHIRImportJob for more information on using the StartFHIRImportJob +// See ListFHIRImportJobs for more information on using the ListFHIRImportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartFHIRImportJobRequest method. -// req, resp := client.StartFHIRImportJobRequest(params) +// // Example sending a request using the ListFHIRImportJobsRequest method. +// req, resp := client.ListFHIRImportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRImportJob -func (c *HealthLake) StartFHIRImportJobRequest(input *StartFHIRImportJobInput) (req *request.Request, output *StartFHIRImportJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/ListFHIRImportJobs +func (c *HealthLake) ListFHIRImportJobsRequest(input *ListFHIRImportJobsInput) (req *request.Request, output *ListFHIRImportJobsOutput) { op := &request.Operation{ - Name: opStartFHIRImportJob, + Name: opListFHIRImportJobs, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StartFHIRImportJobInput{} + input = &ListFHIRImportJobsInput{} } - output = &StartFHIRImportJobOutput{} + output = &ListFHIRImportJobsOutput{} req = c.newRequest(op, input, output) return } -// StartFHIRImportJob API operation for Amazon HealthLake. +// ListFHIRImportJobs API operation for Amazon HealthLake. // -// Begins a FHIR Import job. +// Lists all FHIR import jobs associated with an account and their statuses. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon HealthLake's -// API operation StartFHIRImportJob for usage and error information. +// API operation ListFHIRImportJobs for usage and error information. // // Returned Error Types: // * ValidationException // The user input parameter was invalid. // -// * ThrottlingException -// The user has exceeded their maximum number of allowed calls to the given -// API. +// * ResourceNotFoundException +// The requested Data Store was not found. // // * AccessDeniedException // Access is denied. Your account is not authorized to perform this operation. // -// * ResourceNotFoundException -// The requested Data Store was not found. +// * ThrottlingException +// The user has exceeded their maximum number of allowed calls to the given +// API. // // * InternalServerException // Unknown error occurs in the service. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRImportJob -func (c *HealthLake) StartFHIRImportJob(input *StartFHIRImportJobInput) (*StartFHIRImportJobOutput, error) { - req, out := c.StartFHIRImportJobRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/ListFHIRImportJobs +func (c *HealthLake) ListFHIRImportJobs(input *ListFHIRImportJobsInput) (*ListFHIRImportJobsOutput, error) { + req, out := c.ListFHIRImportJobsRequest(input) return out, req.Send() } -// StartFHIRImportJobWithContext is the same as StartFHIRImportJob with the addition of +// ListFHIRImportJobsWithContext is the same as ListFHIRImportJobs with the addition of // the ability to pass a context and additional request options. // -// See StartFHIRImportJob for details on how to use this API operation. +// See ListFHIRImportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *HealthLake) StartFHIRImportJobWithContext(ctx aws.Context, input *StartFHIRImportJobInput, opts ...request.Option) (*StartFHIRImportJobOutput, error) { - req, out := c.StartFHIRImportJobRequest(input) +func (c *HealthLake) ListFHIRImportJobsWithContext(ctx aws.Context, input *ListFHIRImportJobsInput, opts ...request.Option) (*ListFHIRImportJobsOutput, error) { + req, out := c.ListFHIRImportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Access is denied. Your account is not authorized to perform this operation. -type AccessDeniedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation -func (s AccessDeniedException) String() string { - return awsutil.Prettify(s) +// ListFHIRImportJobsPages iterates over the pages of a ListFHIRImportJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFHIRImportJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFHIRImportJobs operation. +// pageNum := 0 +// err := client.ListFHIRImportJobsPages(params, +// func(page *healthlake.ListFHIRImportJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *HealthLake) ListFHIRImportJobsPages(input *ListFHIRImportJobsInput, fn func(*ListFHIRImportJobsOutput, bool) bool) error { + return c.ListFHIRImportJobsPagesWithContext(aws.BackgroundContext(), input, fn) } -// GoString returns the string representation -func (s AccessDeniedException) GoString() string { - return s.String() -} +// ListFHIRImportJobsPagesWithContext same as ListFHIRImportJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *HealthLake) ListFHIRImportJobsPagesWithContext(ctx aws.Context, input *ListFHIRImportJobsInput, fn func(*ListFHIRImportJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListFHIRImportJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListFHIRImportJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } -func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { - return &AccessDeniedException{ - RespMetadata: v, + for p.Next() { + if !fn(p.Page().(*ListFHIRImportJobsOutput), !p.HasNextPage()) { + break + } } -} -// Code returns the exception type name. -func (s *AccessDeniedException) Code() string { - return "AccessDeniedException" + return p.Err() } -// Message returns the exception's message. -func (s *AccessDeniedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} +const opListTagsForResource = "ListTagsForResource" -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *AccessDeniedException) OrigErr() error { - return nil -} +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/ListTagsForResource +func (c *HealthLake) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } -func (s *AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} + if input == nil { + input = &ListTagsForResourceInput{} + } -// Status code returns the HTTP status code for the request's response error. -func (s *AccessDeniedException) StatusCode() int { - return s.RespMetadata.StatusCode + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return } -// RequestID returns the service's response RequestID for request. -func (s *AccessDeniedException) RequestID() string { - return s.RespMetadata.RequestID +// ListTagsForResource API operation for Amazon HealthLake. +// +// Returns a list of all existing tags associated with a Data Store. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon HealthLake's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The user input parameter was invalid. +// +// * ResourceNotFoundException +// The requested Data Store was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/ListTagsForResource +func (c *HealthLake) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() } -// The Data Store is in a transition state and the user requested action can -// not be performed. -type ConflictException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *HealthLake) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// String returns the string representation -func (s ConflictException) String() string { - return awsutil.Prettify(s) -} +const opStartFHIRExportJob = "StartFHIRExportJob" + +// StartFHIRExportJobRequest generates a "aws/request.Request" representing the +// client's request for the StartFHIRExportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartFHIRExportJob for more information on using the StartFHIRExportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartFHIRExportJobRequest method. +// req, resp := client.StartFHIRExportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRExportJob +func (c *HealthLake) StartFHIRExportJobRequest(input *StartFHIRExportJobInput) (req *request.Request, output *StartFHIRExportJobOutput) { + op := &request.Operation{ + Name: opStartFHIRExportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartFHIRExportJobInput{} + } + + output = &StartFHIRExportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartFHIRExportJob API operation for Amazon HealthLake. +// +// Begins a FHIR export job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon HealthLake's +// API operation StartFHIRExportJob for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The user input parameter was invalid. +// +// * ThrottlingException +// The user has exceeded their maximum number of allowed calls to the given +// API. +// +// * AccessDeniedException +// Access is denied. Your account is not authorized to perform this operation. +// +// * ResourceNotFoundException +// The requested Data Store was not found. +// +// * InternalServerException +// Unknown error occurs in the service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRExportJob +func (c *HealthLake) StartFHIRExportJob(input *StartFHIRExportJobInput) (*StartFHIRExportJobOutput, error) { + req, out := c.StartFHIRExportJobRequest(input) + return out, req.Send() +} + +// StartFHIRExportJobWithContext is the same as StartFHIRExportJob with the addition of +// the ability to pass a context and additional request options. +// +// See StartFHIRExportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *HealthLake) StartFHIRExportJobWithContext(ctx aws.Context, input *StartFHIRExportJobInput, opts ...request.Option) (*StartFHIRExportJobOutput, error) { + req, out := c.StartFHIRExportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartFHIRImportJob = "StartFHIRImportJob" + +// StartFHIRImportJobRequest generates a "aws/request.Request" representing the +// client's request for the StartFHIRImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartFHIRImportJob for more information on using the StartFHIRImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartFHIRImportJobRequest method. +// req, resp := client.StartFHIRImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRImportJob +func (c *HealthLake) StartFHIRImportJobRequest(input *StartFHIRImportJobInput) (req *request.Request, output *StartFHIRImportJobOutput) { + op := &request.Operation{ + Name: opStartFHIRImportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartFHIRImportJobInput{} + } + + output = &StartFHIRImportJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartFHIRImportJob API operation for Amazon HealthLake. +// +// Begins a FHIR Import job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon HealthLake's +// API operation StartFHIRImportJob for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The user input parameter was invalid. +// +// * ThrottlingException +// The user has exceeded their maximum number of allowed calls to the given +// API. +// +// * AccessDeniedException +// Access is denied. Your account is not authorized to perform this operation. +// +// * ResourceNotFoundException +// The requested Data Store was not found. +// +// * InternalServerException +// Unknown error occurs in the service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/StartFHIRImportJob +func (c *HealthLake) StartFHIRImportJob(input *StartFHIRImportJobInput) (*StartFHIRImportJobOutput, error) { + req, out := c.StartFHIRImportJobRequest(input) + return out, req.Send() +} + +// StartFHIRImportJobWithContext is the same as StartFHIRImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See StartFHIRImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *HealthLake) StartFHIRImportJobWithContext(ctx aws.Context, input *StartFHIRImportJobInput, opts ...request.Option) (*StartFHIRImportJobOutput, error) { + req, out := c.StartFHIRImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/TagResource +func (c *HealthLake) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon HealthLake. +// +// Adds a user specifed key and value tag to a Data Store. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon HealthLake's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The user input parameter was invalid. +// +// * ResourceNotFoundException +// The requested Data Store was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/TagResource +func (c *HealthLake) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *HealthLake) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/UntagResource +func (c *HealthLake) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon HealthLake. +// +// Removes tags from a Data Store. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon HealthLake's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The user input parameter was invalid. +// +// * ResourceNotFoundException +// The requested Data Store was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/UntagResource +func (c *HealthLake) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *HealthLake) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Access is denied. Your account is not authorized to perform this operation. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The Data Store is in a transition state and the user requested action can +// not be performed. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} // GoString returns the string representation func (s ConflictException) GoString() string { return s.String() } -func newErrorConflictException(v protocol.ResponseMetadata) error { - return &ConflictException{ - RespMetadata: v, - } +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateFHIRDatastoreInput struct { + _ struct{} `type:"structure"` + + // Optional user provided token used for ensuring idempotency. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The user generated name for the Data Store. + DatastoreName *string `min:"1" type:"string"` + + // The FHIR version of the Data Store. The only supported version is R4. + // + // DatastoreTypeVersion is a required field + DatastoreTypeVersion *string `type:"string" required:"true" enum:"FHIRVersion"` + + // Optional parameter to preload data upon creation of the Data Store. Currently, + // the only supported preloaded data is synthetic data generated from Synthea. + PreloadDataConfig *PreloadDataConfig `type:"structure"` + + // The server-side encryption key configuration for a customer provided encryption + // key specified for creating a Data Store. + SseConfiguration *SseConfiguration `type:"structure"` + + // Resource tags that are applied to a Data Store when it is created. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateFHIRDatastoreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFHIRDatastoreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFHIRDatastoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFHIRDatastoreInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) + } + if s.DatastoreTypeVersion == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreTypeVersion")) + } + if s.PreloadDataConfig != nil { + if err := s.PreloadDataConfig.Validate(); err != nil { + invalidParams.AddNested("PreloadDataConfig", err.(request.ErrInvalidParams)) + } + } + if s.SseConfiguration != nil { + if err := s.SseConfiguration.Validate(); err != nil { + invalidParams.AddNested("SseConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateFHIRDatastoreInput) SetClientToken(v string) *CreateFHIRDatastoreInput { + s.ClientToken = &v + return s +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *CreateFHIRDatastoreInput) SetDatastoreName(v string) *CreateFHIRDatastoreInput { + s.DatastoreName = &v + return s +} + +// SetDatastoreTypeVersion sets the DatastoreTypeVersion field's value. +func (s *CreateFHIRDatastoreInput) SetDatastoreTypeVersion(v string) *CreateFHIRDatastoreInput { + s.DatastoreTypeVersion = &v + return s +} + +// SetPreloadDataConfig sets the PreloadDataConfig field's value. +func (s *CreateFHIRDatastoreInput) SetPreloadDataConfig(v *PreloadDataConfig) *CreateFHIRDatastoreInput { + s.PreloadDataConfig = v + return s +} + +// SetSseConfiguration sets the SseConfiguration field's value. +func (s *CreateFHIRDatastoreInput) SetSseConfiguration(v *SseConfiguration) *CreateFHIRDatastoreInput { + s.SseConfiguration = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateFHIRDatastoreInput) SetTags(v []*Tag) *CreateFHIRDatastoreInput { + s.Tags = v + return s +} + +type CreateFHIRDatastoreOutput struct { + _ struct{} `type:"structure"` + + // The datastore ARN is generated during the creation of the Data Store and + // can be found in the output from the initial Data Store creation call. + // + // DatastoreArn is a required field + DatastoreArn *string `type:"string" required:"true"` + + // The AWS endpoint for the created Data Store. For preview, only US-east-1 + // endpoints are supported. + // + // DatastoreEndpoint is a required field + DatastoreEndpoint *string `min:"1" type:"string" required:"true"` + + // The AWS-generated Data Store id. This id is in the output from the initial + // Data Store creation call. + // + // DatastoreId is a required field + DatastoreId *string `min:"1" type:"string" required:"true"` + + // The status of the FHIR Data Store. Possible statuses are ‘CREATING’, + // ‘ACTIVE’, ‘DELETING’, ‘DELETED’. + // + // DatastoreStatus is a required field + DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` +} + +// String returns the string representation +func (s CreateFHIRDatastoreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFHIRDatastoreOutput) GoString() string { + return s.String() +} + +// SetDatastoreArn sets the DatastoreArn field's value. +func (s *CreateFHIRDatastoreOutput) SetDatastoreArn(v string) *CreateFHIRDatastoreOutput { + s.DatastoreArn = &v + return s +} + +// SetDatastoreEndpoint sets the DatastoreEndpoint field's value. +func (s *CreateFHIRDatastoreOutput) SetDatastoreEndpoint(v string) *CreateFHIRDatastoreOutput { + s.DatastoreEndpoint = &v + return s +} + +// SetDatastoreId sets the DatastoreId field's value. +func (s *CreateFHIRDatastoreOutput) SetDatastoreId(v string) *CreateFHIRDatastoreOutput { + s.DatastoreId = &v + return s +} + +// SetDatastoreStatus sets the DatastoreStatus field's value. +func (s *CreateFHIRDatastoreOutput) SetDatastoreStatus(v string) *CreateFHIRDatastoreOutput { + s.DatastoreStatus = &v + return s +} + +// The filters applied to Data Store query. +type DatastoreFilter struct { + _ struct{} `type:"structure"` + + // A filter that allows the user to set cutoff dates for records. All Data Stores + // created after the specified date will be included in the results. + CreatedAfter *time.Time `type:"timestamp"` + + // A filter that allows the user to set cutoff dates for records. All Data Stores + // created before the specified date will be included in the results. + CreatedBefore *time.Time `type:"timestamp"` + + // Allows the user to filter Data Store results by name. + DatastoreName *string `min:"1" type:"string"` + + // Allows the user to filter Data Store results by status. + DatastoreStatus *string `type:"string" enum:"DatastoreStatus"` +} + +// String returns the string representation +func (s DatastoreFilter) String() string { + return awsutil.Prettify(s) } -// Code returns the exception type name. -func (s *ConflictException) Code() string { - return "ConflictException" +// GoString returns the string representation +func (s DatastoreFilter) GoString() string { + return s.String() } -// Message returns the exception's message. -func (s *ConflictException) Message() string { - if s.Message_ != nil { - return *s.Message_ +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatastoreFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatastoreFilter"} + if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) } - return "" -} -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ConflictException) OrigErr() error { + if invalidParams.Len() > 0 { + return invalidParams + } return nil } -func (s *ConflictException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetCreatedAfter sets the CreatedAfter field's value. +func (s *DatastoreFilter) SetCreatedAfter(v time.Time) *DatastoreFilter { + s.CreatedAfter = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *ConflictException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetCreatedBefore sets the CreatedBefore field's value. +func (s *DatastoreFilter) SetCreatedBefore(v time.Time) *DatastoreFilter { + s.CreatedBefore = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *ConflictException) RequestID() string { - return s.RespMetadata.RequestID +// SetDatastoreName sets the DatastoreName field's value. +func (s *DatastoreFilter) SetDatastoreName(v string) *DatastoreFilter { + s.DatastoreName = &v + return s } -type CreateFHIRDatastoreInput struct { +// SetDatastoreStatus sets the DatastoreStatus field's value. +func (s *DatastoreFilter) SetDatastoreStatus(v string) *DatastoreFilter { + s.DatastoreStatus = &v + return s +} + +// Displays the properties of the Data Store, including the ID, Arn, name, and +// the status of the Data Store. +type DatastoreProperties struct { _ struct{} `type:"structure"` - // Optional user provided token used for ensuring idempotency. - ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + // The time that a Data Store was created. + CreatedAt *time.Time `type:"timestamp"` - // The user generated name for the Data Store. + // The Amazon Resource Name used in the creation of the Data Store. + // + // DatastoreArn is a required field + DatastoreArn *string `type:"string" required:"true"` + + // The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint + // with Data Store ID in the endpoint URL. + // + // DatastoreEndpoint is a required field + DatastoreEndpoint *string `type:"string" required:"true"` + + // The AWS-generated ID number for the Data Store. + // + // DatastoreId is a required field + DatastoreId *string `min:"1" type:"string" required:"true"` + + // The user-generated name for the Data Store. DatastoreName *string `min:"1" type:"string"` - // The FHIR version of the Data Store. The only supported version is R4. + // The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', + // 'DELETING', or 'DELETED'. + // + // DatastoreStatus is a required field + DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` + + // The FHIR version. Only R4 version data is supported. // // DatastoreTypeVersion is a required field DatastoreTypeVersion *string `type:"string" required:"true" enum:"FHIRVersion"` - // Optional parameter to preload data upon creation of the Data Store. Currently, - // the only supported preloaded data is synthetic data generated from Synthea. + // The preloaded data configuration for the Data Store. Only data preloaded + // from Synthea is supported. PreloadDataConfig *PreloadDataConfig `type:"structure"` + + // The server-side encryption key configuration for a customer provided encryption + // key (CMK). + SseConfiguration *SseConfiguration `type:"structure"` +} + +// String returns the string representation +func (s DatastoreProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatastoreProperties) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *DatastoreProperties) SetCreatedAt(v time.Time) *DatastoreProperties { + s.CreatedAt = &v + return s +} + +// SetDatastoreArn sets the DatastoreArn field's value. +func (s *DatastoreProperties) SetDatastoreArn(v string) *DatastoreProperties { + s.DatastoreArn = &v + return s +} + +// SetDatastoreEndpoint sets the DatastoreEndpoint field's value. +func (s *DatastoreProperties) SetDatastoreEndpoint(v string) *DatastoreProperties { + s.DatastoreEndpoint = &v + return s +} + +// SetDatastoreId sets the DatastoreId field's value. +func (s *DatastoreProperties) SetDatastoreId(v string) *DatastoreProperties { + s.DatastoreId = &v + return s +} + +// SetDatastoreName sets the DatastoreName field's value. +func (s *DatastoreProperties) SetDatastoreName(v string) *DatastoreProperties { + s.DatastoreName = &v + return s +} + +// SetDatastoreStatus sets the DatastoreStatus field's value. +func (s *DatastoreProperties) SetDatastoreStatus(v string) *DatastoreProperties { + s.DatastoreStatus = &v + return s +} + +// SetDatastoreTypeVersion sets the DatastoreTypeVersion field's value. +func (s *DatastoreProperties) SetDatastoreTypeVersion(v string) *DatastoreProperties { + s.DatastoreTypeVersion = &v + return s +} + +// SetPreloadDataConfig sets the PreloadDataConfig field's value. +func (s *DatastoreProperties) SetPreloadDataConfig(v *PreloadDataConfig) *DatastoreProperties { + s.PreloadDataConfig = v + return s +} + +// SetSseConfiguration sets the SseConfiguration field's value. +func (s *DatastoreProperties) SetSseConfiguration(v *SseConfiguration) *DatastoreProperties { + s.SseConfiguration = v + return s +} + +type DeleteFHIRDatastoreInput struct { + _ struct{} `type:"structure"` + + // The AWS-generated ID for the Data Store to be deleted. + DatastoreId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteFHIRDatastoreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFHIRDatastoreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFHIRDatastoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFHIRDatastoreInput"} + if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatastoreId sets the DatastoreId field's value. +func (s *DeleteFHIRDatastoreInput) SetDatastoreId(v string) *DeleteFHIRDatastoreInput { + s.DatastoreId = &v + return s +} + +type DeleteFHIRDatastoreOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission. + // + // DatastoreArn is a required field + DatastoreArn *string `type:"string" required:"true"` + + // The AWS endpoint for the Data Store the user has requested to be deleted. + // + // DatastoreEndpoint is a required field + DatastoreEndpoint *string `min:"1" type:"string" required:"true"` + + // The AWS-generated ID for the Data Store to be deleted. + // + // DatastoreId is a required field + DatastoreId *string `min:"1" type:"string" required:"true"` + + // The status of the Data Store that the user has requested to be deleted. + // + // DatastoreStatus is a required field + DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` +} + +// String returns the string representation +func (s DeleteFHIRDatastoreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFHIRDatastoreOutput) GoString() string { + return s.String() +} + +// SetDatastoreArn sets the DatastoreArn field's value. +func (s *DeleteFHIRDatastoreOutput) SetDatastoreArn(v string) *DeleteFHIRDatastoreOutput { + s.DatastoreArn = &v + return s +} + +// SetDatastoreEndpoint sets the DatastoreEndpoint field's value. +func (s *DeleteFHIRDatastoreOutput) SetDatastoreEndpoint(v string) *DeleteFHIRDatastoreOutput { + s.DatastoreEndpoint = &v + return s +} + +// SetDatastoreId sets the DatastoreId field's value. +func (s *DeleteFHIRDatastoreOutput) SetDatastoreId(v string) *DeleteFHIRDatastoreOutput { + s.DatastoreId = &v + return s +} + +// SetDatastoreStatus sets the DatastoreStatus field's value. +func (s *DeleteFHIRDatastoreOutput) SetDatastoreStatus(v string) *DeleteFHIRDatastoreOutput { + s.DatastoreStatus = &v + return s +} + +type DescribeFHIRDatastoreInput struct { + _ struct{} `type:"structure"` + + // The AWS-generated Data Store id. This is part of the ‘CreateFHIRDatastore’ + // output. + DatastoreId *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateFHIRDatastoreInput) String() string { +func (s DescribeFHIRDatastoreInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateFHIRDatastoreInput) GoString() string { +func (s DescribeFHIRDatastoreInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateFHIRDatastoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateFHIRDatastoreInput"} - if s.ClientToken != nil && len(*s.ClientToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) - } - if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) - } - if s.DatastoreTypeVersion == nil { - invalidParams.Add(request.NewErrParamRequired("DatastoreTypeVersion")) - } - if s.PreloadDataConfig != nil { - if err := s.PreloadDataConfig.Validate(); err != nil { - invalidParams.AddNested("PreloadDataConfig", err.(request.ErrInvalidParams)) - } +func (s *DescribeFHIRDatastoreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFHIRDatastoreInput"} + if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) } if invalidParams.Len() > 0 { @@ -960,126 +1934,162 @@ func (s *CreateFHIRDatastoreInput) Validate() error { return nil } -// SetClientToken sets the ClientToken field's value. -func (s *CreateFHIRDatastoreInput) SetClientToken(v string) *CreateFHIRDatastoreInput { - s.ClientToken = &v +// SetDatastoreId sets the DatastoreId field's value. +func (s *DescribeFHIRDatastoreInput) SetDatastoreId(v string) *DescribeFHIRDatastoreInput { + s.DatastoreId = &v return s } -// SetDatastoreName sets the DatastoreName field's value. -func (s *CreateFHIRDatastoreInput) SetDatastoreName(v string) *CreateFHIRDatastoreInput { - s.DatastoreName = &v - return s +type DescribeFHIRDatastoreOutput struct { + _ struct{} `type:"structure"` + + // All properties associated with a Data Store, including the Data Store ID, + // Data Store ARN, Data Store name, Data Store status, created at, Data Store + // type version, and Data Store endpoint. + // + // DatastoreProperties is a required field + DatastoreProperties *DatastoreProperties `type:"structure" required:"true"` } -// SetDatastoreTypeVersion sets the DatastoreTypeVersion field's value. -func (s *CreateFHIRDatastoreInput) SetDatastoreTypeVersion(v string) *CreateFHIRDatastoreInput { - s.DatastoreTypeVersion = &v - return s +// String returns the string representation +func (s DescribeFHIRDatastoreOutput) String() string { + return awsutil.Prettify(s) } -// SetPreloadDataConfig sets the PreloadDataConfig field's value. -func (s *CreateFHIRDatastoreInput) SetPreloadDataConfig(v *PreloadDataConfig) *CreateFHIRDatastoreInput { - s.PreloadDataConfig = v +// GoString returns the string representation +func (s DescribeFHIRDatastoreOutput) GoString() string { + return s.String() +} + +// SetDatastoreProperties sets the DatastoreProperties field's value. +func (s *DescribeFHIRDatastoreOutput) SetDatastoreProperties(v *DatastoreProperties) *DescribeFHIRDatastoreOutput { + s.DatastoreProperties = v return s } -type CreateFHIRDatastoreOutput struct { +type DescribeFHIRExportJobInput struct { _ struct{} `type:"structure"` - // The datastore ARN is generated during the creation of the Data Store and - // can be found in the output from the initial Data Store creation call. - // - // DatastoreArn is a required field - DatastoreArn *string `type:"string" required:"true"` - - // The AWS endpoint for the created Data Store. For preview, only US-east-1 - // endpoints are supported. - // - // DatastoreEndpoint is a required field - DatastoreEndpoint *string `min:"1" type:"string" required:"true"` - - // The AWS-generated Data Store id. This id is in the output from the initial - // Data Store creation call. + // The AWS generated ID for the Data Store from which files are being exported + // from for an export job. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` - // The status of the FHIR Data Store. Possible statuses are ‘CREATING’, - // ‘ACTIVE’, ‘DELETING’, ‘DELETED’. + // The AWS generated ID for an export job. // - // DatastoreStatus is a required field - DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateFHIRDatastoreOutput) String() string { +func (s DescribeFHIRExportJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateFHIRDatastoreOutput) GoString() string { +func (s DescribeFHIRExportJobInput) GoString() string { return s.String() } -// SetDatastoreArn sets the DatastoreArn field's value. -func (s *CreateFHIRDatastoreOutput) SetDatastoreArn(v string) *CreateFHIRDatastoreOutput { - s.DatastoreArn = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFHIRExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFHIRExportJobInput"} + if s.DatastoreId == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreId")) + } + if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } -// SetDatastoreEndpoint sets the DatastoreEndpoint field's value. -func (s *CreateFHIRDatastoreOutput) SetDatastoreEndpoint(v string) *CreateFHIRDatastoreOutput { - s.DatastoreEndpoint = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetDatastoreId sets the DatastoreId field's value. -func (s *CreateFHIRDatastoreOutput) SetDatastoreId(v string) *CreateFHIRDatastoreOutput { +func (s *DescribeFHIRExportJobInput) SetDatastoreId(v string) *DescribeFHIRExportJobInput { s.DatastoreId = &v return s } -// SetDatastoreStatus sets the DatastoreStatus field's value. -func (s *CreateFHIRDatastoreOutput) SetDatastoreStatus(v string) *CreateFHIRDatastoreOutput { - s.DatastoreStatus = &v +// SetJobId sets the JobId field's value. +func (s *DescribeFHIRExportJobInput) SetJobId(v string) *DescribeFHIRExportJobInput { + s.JobId = &v return s } -// The filters applied to Data Store query. -type DatastoreFilter struct { +type DescribeFHIRExportJobOutput struct { _ struct{} `type:"structure"` - // A filter that allows the user to set cutoff dates for records. All Data Stores - // created after the specified date will be included in the results. - CreatedAfter *time.Time `type:"timestamp"` + // Displays the properties of the export job, including the ID, Arn, Name, and + // the status of the job. + // + // ExportJobProperties is a required field + ExportJobProperties *ExportJobProperties `type:"structure" required:"true"` +} - // A filter that allows the user to set cutoff dates for records. All Data Stores - // created before the specified date will be included in the results. - CreatedBefore *time.Time `type:"timestamp"` +// String returns the string representation +func (s DescribeFHIRExportJobOutput) String() string { + return awsutil.Prettify(s) +} - // Allows the user to filter Data Store results by name. - DatastoreName *string `min:"1" type:"string"` +// GoString returns the string representation +func (s DescribeFHIRExportJobOutput) GoString() string { + return s.String() +} - // Allows the user to filter Data Store results by status. - DatastoreStatus *string `type:"string" enum:"DatastoreStatus"` +// SetExportJobProperties sets the ExportJobProperties field's value. +func (s *DescribeFHIRExportJobOutput) SetExportJobProperties(v *ExportJobProperties) *DescribeFHIRExportJobOutput { + s.ExportJobProperties = v + return s +} + +type DescribeFHIRImportJobInput struct { + _ struct{} `type:"structure"` + + // The AWS-generated ID of the Data Store. + // + // DatastoreId is a required field + DatastoreId *string `min:"1" type:"string" required:"true"` + + // The AWS-generated job ID. + // + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DatastoreFilter) String() string { +func (s DescribeFHIRImportJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DatastoreFilter) GoString() string { +func (s DescribeFHIRImportJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DatastoreFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DatastoreFilter"} - if s.DatastoreName != nil && len(*s.DatastoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatastoreName", 1)) +func (s *DescribeFHIRImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFHIRImportJobInput"} + if s.DatastoreId == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreId")) + } + if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) } if invalidParams.Len() > 0 { @@ -1088,412 +2098,387 @@ func (s *DatastoreFilter) Validate() error { return nil } -// SetCreatedAfter sets the CreatedAfter field's value. -func (s *DatastoreFilter) SetCreatedAfter(v time.Time) *DatastoreFilter { - s.CreatedAfter = &v +// SetDatastoreId sets the DatastoreId field's value. +func (s *DescribeFHIRImportJobInput) SetDatastoreId(v string) *DescribeFHIRImportJobInput { + s.DatastoreId = &v return s } -// SetCreatedBefore sets the CreatedBefore field's value. -func (s *DatastoreFilter) SetCreatedBefore(v time.Time) *DatastoreFilter { - s.CreatedBefore = &v +// SetJobId sets the JobId field's value. +func (s *DescribeFHIRImportJobInput) SetJobId(v string) *DescribeFHIRImportJobInput { + s.JobId = &v return s } -// SetDatastoreName sets the DatastoreName field's value. -func (s *DatastoreFilter) SetDatastoreName(v string) *DatastoreFilter { - s.DatastoreName = &v - return s +type DescribeFHIRImportJobOutput struct { + _ struct{} `type:"structure"` + + // The properties of the Import job request, including the ID, ARN, name, and + // the status of the job. + // + // ImportJobProperties is a required field + ImportJobProperties *ImportJobProperties `type:"structure" required:"true"` } -// SetDatastoreStatus sets the DatastoreStatus field's value. -func (s *DatastoreFilter) SetDatastoreStatus(v string) *DatastoreFilter { - s.DatastoreStatus = &v +// String returns the string representation +func (s DescribeFHIRImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFHIRImportJobOutput) GoString() string { + return s.String() +} + +// SetImportJobProperties sets the ImportJobProperties field's value. +func (s *DescribeFHIRImportJobOutput) SetImportJobProperties(v *ImportJobProperties) *DescribeFHIRImportJobOutput { + s.ImportJobProperties = v return s } -// Displays the properties of the Data Store, including the ID, Arn, name, and -// the status of the Data Store. -type DatastoreProperties struct { +// The properties of a FHIR export job, including the ID, ARN, name, and the +// status of the job. +type ExportJobProperties struct { _ struct{} `type:"structure"` - // The time that a Data Store was created. - CreatedAt *time.Time `type:"timestamp"` + // The Amazon Resource Name used during the initiation of the job. + DataAccessRoleArn *string `min:"20" type:"string"` - // The Amazon Resource Name used in the creation of the Data Store. + // The AWS generated ID for the Data Store from which files are being exported + // for an export job. // - // DatastoreArn is a required field - DatastoreArn *string `type:"string" required:"true"` + // DatastoreId is a required field + DatastoreId *string `min:"1" type:"string" required:"true"` - // The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint - // with Data Store ID in the endpoint URL. - // - // DatastoreEndpoint is a required field - DatastoreEndpoint *string `type:"string" required:"true"` + // The time an export job completed. + EndTime *time.Time `type:"timestamp"` - // The AWS-generated ID number for the Data Store. + // The AWS generated ID for an export job. // - // DatastoreId is a required field - DatastoreId *string `min:"1" type:"string" required:"true"` + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` - // The user-generated name for the Data Store. - DatastoreName *string `min:"1" type:"string"` + // The user generated name for an export job. + JobName *string `min:"1" type:"string"` - // The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', - // 'DELETING', or 'DELETED'. + // The status of a FHIR export job. Possible statuses are SUBMITTED, IN_PROGRESS, + // COMPLETED, or FAILED. // - // DatastoreStatus is a required field - DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"JobStatus"` - // The FHIR version. Only R4 version data is supported. + // An explanation of any errors that may have occurred during the export job. + Message *string `min:"1" type:"string"` + + // The output data configuration that was supplied when the export job was created. // - // DatastoreTypeVersion is a required field - DatastoreTypeVersion *string `type:"string" required:"true" enum:"FHIRVersion"` + // OutputDataConfig is a required field + OutputDataConfig *OutputDataConfig `type:"structure" required:"true"` - // The preloaded data configuration for the Data Store. Only data preloaded - // from Synthea is supported. - PreloadDataConfig *PreloadDataConfig `type:"structure"` + // The time an export job was initiated. + // + // SubmitTime is a required field + SubmitTime *time.Time `type:"timestamp" required:"true"` } // String returns the string representation -func (s DatastoreProperties) String() string { +func (s ExportJobProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DatastoreProperties) GoString() string { +func (s ExportJobProperties) GoString() string { return s.String() } -// SetCreatedAt sets the CreatedAt field's value. -func (s *DatastoreProperties) SetCreatedAt(v time.Time) *DatastoreProperties { - s.CreatedAt = &v - return s -} - -// SetDatastoreArn sets the DatastoreArn field's value. -func (s *DatastoreProperties) SetDatastoreArn(v string) *DatastoreProperties { - s.DatastoreArn = &v - return s -} - -// SetDatastoreEndpoint sets the DatastoreEndpoint field's value. -func (s *DatastoreProperties) SetDatastoreEndpoint(v string) *DatastoreProperties { - s.DatastoreEndpoint = &v +// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. +func (s *ExportJobProperties) SetDataAccessRoleArn(v string) *ExportJobProperties { + s.DataAccessRoleArn = &v return s } // SetDatastoreId sets the DatastoreId field's value. -func (s *DatastoreProperties) SetDatastoreId(v string) *DatastoreProperties { +func (s *ExportJobProperties) SetDatastoreId(v string) *ExportJobProperties { s.DatastoreId = &v return s } -// SetDatastoreName sets the DatastoreName field's value. -func (s *DatastoreProperties) SetDatastoreName(v string) *DatastoreProperties { - s.DatastoreName = &v +// SetEndTime sets the EndTime field's value. +func (s *ExportJobProperties) SetEndTime(v time.Time) *ExportJobProperties { + s.EndTime = &v return s } -// SetDatastoreStatus sets the DatastoreStatus field's value. -func (s *DatastoreProperties) SetDatastoreStatus(v string) *DatastoreProperties { - s.DatastoreStatus = &v +// SetJobId sets the JobId field's value. +func (s *ExportJobProperties) SetJobId(v string) *ExportJobProperties { + s.JobId = &v return s } -// SetDatastoreTypeVersion sets the DatastoreTypeVersion field's value. -func (s *DatastoreProperties) SetDatastoreTypeVersion(v string) *DatastoreProperties { - s.DatastoreTypeVersion = &v +// SetJobName sets the JobName field's value. +func (s *ExportJobProperties) SetJobName(v string) *ExportJobProperties { + s.JobName = &v return s } -// SetPreloadDataConfig sets the PreloadDataConfig field's value. -func (s *DatastoreProperties) SetPreloadDataConfig(v *PreloadDataConfig) *DatastoreProperties { - s.PreloadDataConfig = v +// SetJobStatus sets the JobStatus field's value. +func (s *ExportJobProperties) SetJobStatus(v string) *ExportJobProperties { + s.JobStatus = &v return s } -type DeleteFHIRDatastoreInput struct { - _ struct{} `type:"structure"` - - // The AWS-generated ID for the Data Store to be deleted. - DatastoreId *string `min:"1" type:"string"` +// SetMessage sets the Message field's value. +func (s *ExportJobProperties) SetMessage(v string) *ExportJobProperties { + s.Message = &v + return s } -// String returns the string representation -func (s DeleteFHIRDatastoreInput) String() string { - return awsutil.Prettify(s) +// SetOutputDataConfig sets the OutputDataConfig field's value. +func (s *ExportJobProperties) SetOutputDataConfig(v *OutputDataConfig) *ExportJobProperties { + s.OutputDataConfig = v + return s } -// GoString returns the string representation -func (s DeleteFHIRDatastoreInput) GoString() string { - return s.String() +// SetSubmitTime sets the SubmitTime field's value. +func (s *ExportJobProperties) SetSubmitTime(v time.Time) *ExportJobProperties { + s.SubmitTime = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFHIRDatastoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFHIRDatastoreInput"} - if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) - } +// Displays the properties of the import job, including the ID, Arn, Name, and +// the status of the Data Store. +type ImportJobProperties struct { + _ struct{} `type:"structure"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your + // input data. + DataAccessRoleArn *string `min:"20" type:"string"` -// SetDatastoreId sets the DatastoreId field's value. -func (s *DeleteFHIRDatastoreInput) SetDatastoreId(v string) *DeleteFHIRDatastoreInput { - s.DatastoreId = &v - return s -} + // The datastore id used when the Import job was created. + // + // DatastoreId is a required field + DatastoreId *string `min:"1" type:"string" required:"true"` -type DeleteFHIRDatastoreOutput struct { - _ struct{} `type:"structure"` + // The time that the Import job was completed. + EndTime *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission. + // The input data configuration that was supplied when the Import job was created. // - // DatastoreArn is a required field - DatastoreArn *string `type:"string" required:"true"` + // InputDataConfig is a required field + InputDataConfig *InputDataConfig `type:"structure" required:"true"` - // The AWS endpoint for the Data Store the user has requested to be deleted. + // The AWS-generated id number for the Import job. // - // DatastoreEndpoint is a required field - DatastoreEndpoint *string `min:"1" type:"string" required:"true"` + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` - // The AWS-generated ID for the Data Store to be deleted. + // The user-generated name for an Import job. + JobName *string `min:"1" type:"string"` + + // The output data configuration that was supplied when the export job was created. + JobOutputDataConfig *OutputDataConfig `type:"structure"` + + // The job status for an Import job. Possible statuses are SUBMITTED, IN_PROGRESS, + // COMPLETED, FAILED. // - // DatastoreId is a required field - DatastoreId *string `min:"1" type:"string" required:"true"` + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"JobStatus"` - // The status of the Data Store that the user has requested to be deleted. + // An explanation of any errors that may have occurred during the FHIR import + // job. + Message *string `min:"1" type:"string"` + + // The time that the Import job was submitted for processing. // - // DatastoreStatus is a required field - DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` + // SubmitTime is a required field + SubmitTime *time.Time `type:"timestamp" required:"true"` } // String returns the string representation -func (s DeleteFHIRDatastoreOutput) String() string { +func (s ImportJobProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFHIRDatastoreOutput) GoString() string { +func (s ImportJobProperties) GoString() string { return s.String() } -// SetDatastoreArn sets the DatastoreArn field's value. -func (s *DeleteFHIRDatastoreOutput) SetDatastoreArn(v string) *DeleteFHIRDatastoreOutput { - s.DatastoreArn = &v - return s -} - -// SetDatastoreEndpoint sets the DatastoreEndpoint field's value. -func (s *DeleteFHIRDatastoreOutput) SetDatastoreEndpoint(v string) *DeleteFHIRDatastoreOutput { - s.DatastoreEndpoint = &v +// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. +func (s *ImportJobProperties) SetDataAccessRoleArn(v string) *ImportJobProperties { + s.DataAccessRoleArn = &v return s } // SetDatastoreId sets the DatastoreId field's value. -func (s *DeleteFHIRDatastoreOutput) SetDatastoreId(v string) *DeleteFHIRDatastoreOutput { +func (s *ImportJobProperties) SetDatastoreId(v string) *ImportJobProperties { s.DatastoreId = &v return s } -// SetDatastoreStatus sets the DatastoreStatus field's value. -func (s *DeleteFHIRDatastoreOutput) SetDatastoreStatus(v string) *DeleteFHIRDatastoreOutput { - s.DatastoreStatus = &v +// SetEndTime sets the EndTime field's value. +func (s *ImportJobProperties) SetEndTime(v time.Time) *ImportJobProperties { + s.EndTime = &v return s } -type DescribeFHIRDatastoreInput struct { - _ struct{} `type:"structure"` +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *ImportJobProperties) SetInputDataConfig(v *InputDataConfig) *ImportJobProperties { + s.InputDataConfig = v + return s +} - // The AWS-generated Data Store id. This is part of the ‘CreateFHIRDatastore’ - // output. - DatastoreId *string `min:"1" type:"string"` +// SetJobId sets the JobId field's value. +func (s *ImportJobProperties) SetJobId(v string) *ImportJobProperties { + s.JobId = &v + return s } -// String returns the string representation -func (s DescribeFHIRDatastoreInput) String() string { - return awsutil.Prettify(s) +// SetJobName sets the JobName field's value. +func (s *ImportJobProperties) SetJobName(v string) *ImportJobProperties { + s.JobName = &v + return s } -// GoString returns the string representation -func (s DescribeFHIRDatastoreInput) GoString() string { - return s.String() +// SetJobOutputDataConfig sets the JobOutputDataConfig field's value. +func (s *ImportJobProperties) SetJobOutputDataConfig(v *OutputDataConfig) *ImportJobProperties { + s.JobOutputDataConfig = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFHIRDatastoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFHIRDatastoreInput"} - if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) - } +// SetJobStatus sets the JobStatus field's value. +func (s *ImportJobProperties) SetJobStatus(v string) *ImportJobProperties { + s.JobStatus = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMessage sets the Message field's value. +func (s *ImportJobProperties) SetMessage(v string) *ImportJobProperties { + s.Message = &v + return s } -// SetDatastoreId sets the DatastoreId field's value. -func (s *DescribeFHIRDatastoreInput) SetDatastoreId(v string) *DescribeFHIRDatastoreInput { - s.DatastoreId = &v +// SetSubmitTime sets the SubmitTime field's value. +func (s *ImportJobProperties) SetSubmitTime(v time.Time) *ImportJobProperties { + s.SubmitTime = &v return s } -type DescribeFHIRDatastoreOutput struct { +// The input properties for an import job. +type InputDataConfig struct { _ struct{} `type:"structure"` - // All properties associated with a Data Store, including the Data Store ID, - // Data Store ARN, Data Store name, Data Store status, created at, Data Store - // type version, and Data Store endpoint. - // - // DatastoreProperties is a required field - DatastoreProperties *DatastoreProperties `type:"structure" required:"true"` + // The S3Uri is the user specified S3 location of the FHIR data to be imported + // into Amazon HealthLake. + S3Uri *string `type:"string"` } // String returns the string representation -func (s DescribeFHIRDatastoreOutput) String() string { +func (s InputDataConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeFHIRDatastoreOutput) GoString() string { +func (s InputDataConfig) GoString() string { return s.String() } -// SetDatastoreProperties sets the DatastoreProperties field's value. -func (s *DescribeFHIRDatastoreOutput) SetDatastoreProperties(v *DatastoreProperties) *DescribeFHIRDatastoreOutput { - s.DatastoreProperties = v +// SetS3Uri sets the S3Uri field's value. +func (s *InputDataConfig) SetS3Uri(v string) *InputDataConfig { + s.S3Uri = &v return s } -type DescribeFHIRExportJobInput struct { - _ struct{} `type:"structure"` - - // The AWS generated ID for the Data Store from which files are being exported - // from for an export job. - // - // DatastoreId is a required field - DatastoreId *string `min:"1" type:"string" required:"true"` +// Unknown error occurs in the service. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The AWS generated ID for an export job. - // - // JobId is a required field - JobId *string `min:"1" type:"string" required:"true"` + Message_ *string `locationName:"Message" type:"string"` } // String returns the string representation -func (s DescribeFHIRExportJobInput) String() string { +func (s InternalServerException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeFHIRExportJobInput) GoString() string { +func (s InternalServerException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFHIRExportJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFHIRExportJobInput"} - if s.DatastoreId == nil { - invalidParams.Add(request.NewErrParamRequired("DatastoreId")) - } - if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) - } - if s.JobId == nil { - invalidParams.Add(request.NewErrParamRequired("JobId")) - } - if s.JobId != nil && len(*s.JobId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, } - return nil } -// SetDatastoreId sets the DatastoreId field's value. -func (s *DescribeFHIRExportJobInput) SetDatastoreId(v string) *DescribeFHIRExportJobInput { - s.DatastoreId = &v - return s +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" } -// SetJobId sets the JobId field's value. -func (s *DescribeFHIRExportJobInput) SetJobId(v string) *DescribeFHIRExportJobInput { - s.JobId = &v - return s +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -type DescribeFHIRExportJobOutput struct { - _ struct{} `type:"structure"` - - // Displays the properties of the export job, including the ID, Arn, Name, and - // the status of the job. - // - // ExportJobProperties is a required field - ExportJobProperties *ExportJobProperties `type:"structure" required:"true"` +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil } -// String returns the string representation -func (s DescribeFHIRExportJobOutput) String() string { - return awsutil.Prettify(s) +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// GoString returns the string representation -func (s DescribeFHIRExportJobOutput) GoString() string { - return s.String() +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetExportJobProperties sets the ExportJobProperties field's value. -func (s *DescribeFHIRExportJobOutput) SetExportJobProperties(v *ExportJobProperties) *DescribeFHIRExportJobOutput { - s.ExportJobProperties = v - return s +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID } -type DescribeFHIRImportJobInput struct { +// The customer-managed-key(CMK) used when creating a Data Store. If a customer +// owned key is not specified, an AWS owned key will be used for encryption. +type KmsEncryptionConfig struct { _ struct{} `type:"structure"` - // The AWS-generated ID of the Data Store. + // The type of customer-managed-key(CMK) used for encyrption. The two types + // of supported CMKs are customer owned CMKs and AWS owned CMKs. // - // DatastoreId is a required field - DatastoreId *string `min:"1" type:"string" required:"true"` + // CmkType is a required field + CmkType *string `type:"string" required:"true" enum:"CmkType"` - // The AWS-generated job ID. - // - // JobId is a required field - JobId *string `min:"1" type:"string" required:"true"` + // The KMS encryption key id/alias used to encrypt the Data Store contents at + // rest. + KmsKeyId *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeFHIRImportJobInput) String() string { +func (s KmsEncryptionConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeFHIRImportJobInput) GoString() string { +func (s KmsEncryptionConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFHIRImportJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFHIRImportJobInput"} - if s.DatastoreId == nil { - invalidParams.Add(request.NewErrParamRequired("DatastoreId")) - } - if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) - } - if s.JobId == nil { - invalidParams.Add(request.NewErrParamRequired("JobId")) +func (s *KmsEncryptionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KmsEncryptionConfig"} + if s.CmkType == nil { + invalidParams.Add(request.NewErrParamRequired("CmkType")) } - if s.JobId != nil && len(*s.JobId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) } if invalidParams.Len() > 0 { @@ -1502,435 +2487,462 @@ func (s *DescribeFHIRImportJobInput) Validate() error { return nil } -// SetDatastoreId sets the DatastoreId field's value. -func (s *DescribeFHIRImportJobInput) SetDatastoreId(v string) *DescribeFHIRImportJobInput { - s.DatastoreId = &v - return s -} - -// SetJobId sets the JobId field's value. -func (s *DescribeFHIRImportJobInput) SetJobId(v string) *DescribeFHIRImportJobInput { - s.JobId = &v +// SetCmkType sets the CmkType field's value. +func (s *KmsEncryptionConfig) SetCmkType(v string) *KmsEncryptionConfig { + s.CmkType = &v return s } -type DescribeFHIRImportJobOutput struct { - _ struct{} `type:"structure"` - - // The properties of the Import job request, including the ID, ARN, name, and - // the status of the job. - // - // ImportJobProperties is a required field - ImportJobProperties *ImportJobProperties `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeFHIRImportJobOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeFHIRImportJobOutput) GoString() string { - return s.String() -} - -// SetImportJobProperties sets the ImportJobProperties field's value. -func (s *DescribeFHIRImportJobOutput) SetImportJobProperties(v *ImportJobProperties) *DescribeFHIRImportJobOutput { - s.ImportJobProperties = v +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *KmsEncryptionConfig) SetKmsKeyId(v string) *KmsEncryptionConfig { + s.KmsKeyId = &v return s } -// The properties of a FHIR export job, including the ID, ARN, name, and the -// status of the job. -type ExportJobProperties struct { +type ListFHIRDatastoresInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name used during the initiation of the job. - DataAccessRoleArn *string `min:"20" type:"string"` - - // The AWS generated ID for the Data Store from which files are being exported - // for an export job. - // - // DatastoreId is a required field - DatastoreId *string `min:"1" type:"string" required:"true"` - - // The time an export job completed. - EndTime *time.Time `type:"timestamp"` - - // The AWS generated ID for an export job. - // - // JobId is a required field - JobId *string `min:"1" type:"string" required:"true"` - - // The user generated name for an export job. - JobName *string `min:"1" type:"string"` - - // The status of a FHIR export job. Possible statuses are SUBMITTED, IN_PROGRESS, - // COMPLETED, or FAILED. - // - // JobStatus is a required field - JobStatus *string `type:"string" required:"true" enum:"JobStatus"` - - // An explanation of any errors that may have occurred during the export job. - Message *string `min:"1" type:"string"` + // Lists all filters associated with a FHIR Data Store request. + Filter *DatastoreFilter `type:"structure"` - // The output data configuration that was supplied when the export job was created. - // - // OutputDataConfig is a required field - OutputDataConfig *OutputDataConfig `type:"structure" required:"true"` + // The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest + // call. + MaxResults *int64 `min:"1" type:"integer"` - // The time an export job was initiated. - // - // SubmitTime is a required field - SubmitTime *time.Time `type:"timestamp" required:"true"` + // Fetches the next page of Data Stores when results are paginated. + NextToken *string `type:"string"` } // String returns the string representation -func (s ExportJobProperties) String() string { +func (s ListFHIRDatastoresInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportJobProperties) GoString() string { +func (s ListFHIRDatastoresInput) GoString() string { return s.String() } -// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. -func (s *ExportJobProperties) SetDataAccessRoleArn(v string) *ExportJobProperties { - s.DataAccessRoleArn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFHIRDatastoresInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFHIRDatastoresInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetDatastoreId sets the DatastoreId field's value. -func (s *ExportJobProperties) SetDatastoreId(v string) *ExportJobProperties { - s.DatastoreId = &v +// SetFilter sets the Filter field's value. +func (s *ListFHIRDatastoresInput) SetFilter(v *DatastoreFilter) *ListFHIRDatastoresInput { + s.Filter = v return s } -// SetEndTime sets the EndTime field's value. -func (s *ExportJobProperties) SetEndTime(v time.Time) *ExportJobProperties { - s.EndTime = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListFHIRDatastoresInput) SetMaxResults(v int64) *ListFHIRDatastoresInput { + s.MaxResults = &v return s } -// SetJobId sets the JobId field's value. -func (s *ExportJobProperties) SetJobId(v string) *ExportJobProperties { - s.JobId = &v +// SetNextToken sets the NextToken field's value. +func (s *ListFHIRDatastoresInput) SetNextToken(v string) *ListFHIRDatastoresInput { + s.NextToken = &v return s } -// SetJobName sets the JobName field's value. -func (s *ExportJobProperties) SetJobName(v string) *ExportJobProperties { - s.JobName = &v - return s +type ListFHIRDatastoresOutput struct { + _ struct{} `type:"structure"` + + // All properties associated with the listed Data Stores. + // + // DatastorePropertiesList is a required field + DatastorePropertiesList []*DatastoreProperties `type:"list" required:"true"` + + // Pagination token that can be used to retrieve the next page of results. + NextToken *string `type:"string"` } -// SetJobStatus sets the JobStatus field's value. -func (s *ExportJobProperties) SetJobStatus(v string) *ExportJobProperties { - s.JobStatus = &v - return s +// String returns the string representation +func (s ListFHIRDatastoresOutput) String() string { + return awsutil.Prettify(s) } -// SetMessage sets the Message field's value. -func (s *ExportJobProperties) SetMessage(v string) *ExportJobProperties { - s.Message = &v - return s +// GoString returns the string representation +func (s ListFHIRDatastoresOutput) GoString() string { + return s.String() } -// SetOutputDataConfig sets the OutputDataConfig field's value. -func (s *ExportJobProperties) SetOutputDataConfig(v *OutputDataConfig) *ExportJobProperties { - s.OutputDataConfig = v +// SetDatastorePropertiesList sets the DatastorePropertiesList field's value. +func (s *ListFHIRDatastoresOutput) SetDatastorePropertiesList(v []*DatastoreProperties) *ListFHIRDatastoresOutput { + s.DatastorePropertiesList = v return s } -// SetSubmitTime sets the SubmitTime field's value. -func (s *ExportJobProperties) SetSubmitTime(v time.Time) *ExportJobProperties { - s.SubmitTime = &v +// SetNextToken sets the NextToken field's value. +func (s *ListFHIRDatastoresOutput) SetNextToken(v string) *ListFHIRDatastoresOutput { + s.NextToken = &v return s } -// Displays the properties of the import job, including the ID, Arn, Name, and -// the status of the Data Store. -type ImportJobProperties struct { +type ListFHIRExportJobsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your - // input data. - DataAccessRoleArn *string `min:"20" type:"string"` - - // The datastore id used when the Import job was created. + // This parameter limits the response to the export job with the specified Data + // Store ID. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` - // The time that the Import job was completed. - EndTime *time.Time `type:"timestamp"` - - // The input data configuration that was supplied when the Import job was created. - // - // InputDataConfig is a required field - InputDataConfig *InputDataConfig `type:"structure" required:"true"` + // This parameter limits the response to the export job with the specified job + // name. + JobName *string `min:"1" type:"string"` - // The AWS-generated id number for the Import job. - // - // JobId is a required field - JobId *string `min:"1" type:"string" required:"true"` + // This parameter limits the response to the export jobs with the specified + // job status. + JobStatus *string `type:"string" enum:"JobStatus"` - // The user-generated name for an Import job. - JobName *string `min:"1" type:"string"` + // This parameter limits the number of results returned for a ListFHIRExportJobs + // to a maximum quantity specified by the user. + MaxResults *int64 `min:"1" type:"integer"` - // The job status for an Import job. Possible statuses are SUBMITTED, IN_PROGRESS, - // COMPLETED, FAILED. - // - // JobStatus is a required field - JobStatus *string `type:"string" required:"true" enum:"JobStatus"` + // A pagination token used to identify the next page of results to return for + // a ListFHIRExportJobs query. + NextToken *string `type:"string"` - // An explanation of any errors that may have occurred during the FHIR import - // job. - Message *string `min:"1" type:"string"` + // This parameter limits the response to FHIR export jobs submitted after a + // user specified date. + SubmittedAfter *time.Time `type:"timestamp"` - // The time that the Import job was submitted for processing. - // - // SubmitTime is a required field - SubmitTime *time.Time `type:"timestamp" required:"true"` + // This parameter limits the response to FHIR export jobs submitted before a + // user specified date. + SubmittedBefore *time.Time `type:"timestamp"` } // String returns the string representation -func (s ImportJobProperties) String() string { +func (s ListFHIRExportJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImportJobProperties) GoString() string { +func (s ListFHIRExportJobsInput) GoString() string { return s.String() } -// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. -func (s *ImportJobProperties) SetDataAccessRoleArn(v string) *ImportJobProperties { - s.DataAccessRoleArn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFHIRExportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFHIRExportJobsInput"} + if s.DatastoreId == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreId")) + } + if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetDatastoreId sets the DatastoreId field's value. -func (s *ImportJobProperties) SetDatastoreId(v string) *ImportJobProperties { +func (s *ListFHIRExportJobsInput) SetDatastoreId(v string) *ListFHIRExportJobsInput { s.DatastoreId = &v return s } -// SetEndTime sets the EndTime field's value. -func (s *ImportJobProperties) SetEndTime(v time.Time) *ImportJobProperties { - s.EndTime = &v - return s -} - -// SetInputDataConfig sets the InputDataConfig field's value. -func (s *ImportJobProperties) SetInputDataConfig(v *InputDataConfig) *ImportJobProperties { - s.InputDataConfig = v +// SetJobName sets the JobName field's value. +func (s *ListFHIRExportJobsInput) SetJobName(v string) *ListFHIRExportJobsInput { + s.JobName = &v return s } -// SetJobId sets the JobId field's value. -func (s *ImportJobProperties) SetJobId(v string) *ImportJobProperties { - s.JobId = &v +// SetJobStatus sets the JobStatus field's value. +func (s *ListFHIRExportJobsInput) SetJobStatus(v string) *ListFHIRExportJobsInput { + s.JobStatus = &v return s } -// SetJobName sets the JobName field's value. -func (s *ImportJobProperties) SetJobName(v string) *ImportJobProperties { - s.JobName = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListFHIRExportJobsInput) SetMaxResults(v int64) *ListFHIRExportJobsInput { + s.MaxResults = &v return s } -// SetJobStatus sets the JobStatus field's value. -func (s *ImportJobProperties) SetJobStatus(v string) *ImportJobProperties { - s.JobStatus = &v +// SetNextToken sets the NextToken field's value. +func (s *ListFHIRExportJobsInput) SetNextToken(v string) *ListFHIRExportJobsInput { + s.NextToken = &v return s } -// SetMessage sets the Message field's value. -func (s *ImportJobProperties) SetMessage(v string) *ImportJobProperties { - s.Message = &v +// SetSubmittedAfter sets the SubmittedAfter field's value. +func (s *ListFHIRExportJobsInput) SetSubmittedAfter(v time.Time) *ListFHIRExportJobsInput { + s.SubmittedAfter = &v return s } -// SetSubmitTime sets the SubmitTime field's value. -func (s *ImportJobProperties) SetSubmitTime(v time.Time) *ImportJobProperties { - s.SubmitTime = &v +// SetSubmittedBefore sets the SubmittedBefore field's value. +func (s *ListFHIRExportJobsInput) SetSubmittedBefore(v time.Time) *ListFHIRExportJobsInput { + s.SubmittedBefore = &v return s } -// The input properties for an import job. -type InputDataConfig struct { +type ListFHIRExportJobsOutput struct { _ struct{} `type:"structure"` - // The S3Uri is the user specified S3 location of the FHIR data to be imported - // into Amazon HealthLake. - S3Uri *string `type:"string"` + // The properties of listed FHIR export jobs, including the ID, ARN, name, and + // the status of the job. + // + // ExportJobPropertiesList is a required field + ExportJobPropertiesList []*ExportJobProperties `type:"list" required:"true"` + + // A pagination token used to identify the next page of results to return for + // a ListFHIRExportJobs query. + NextToken *string `type:"string"` } // String returns the string representation -func (s InputDataConfig) String() string { +func (s ListFHIRExportJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputDataConfig) GoString() string { +func (s ListFHIRExportJobsOutput) GoString() string { return s.String() } -// SetS3Uri sets the S3Uri field's value. -func (s *InputDataConfig) SetS3Uri(v string) *InputDataConfig { - s.S3Uri = &v +// SetExportJobPropertiesList sets the ExportJobPropertiesList field's value. +func (s *ListFHIRExportJobsOutput) SetExportJobPropertiesList(v []*ExportJobProperties) *ListFHIRExportJobsOutput { + s.ExportJobPropertiesList = v return s } -// Unknown error occurs in the service. -type InternalServerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +// SetNextToken sets the NextToken field's value. +func (s *ListFHIRExportJobsOutput) SetNextToken(v string) *ListFHIRExportJobsOutput { + s.NextToken = &v + return s +} - Message_ *string `locationName:"Message" type:"string"` +type ListFHIRImportJobsInput struct { + _ struct{} `type:"structure"` + + // This parameter limits the response to the import job with the specified Data + // Store ID. + // + // DatastoreId is a required field + DatastoreId *string `min:"1" type:"string" required:"true"` + + // This parameter limits the response to the import job with the specified job + // name. + JobName *string `min:"1" type:"string"` + + // This parameter limits the response to the import job with the specified job + // status. + JobStatus *string `type:"string" enum:"JobStatus"` + + // This parameter limits the number of results returned for a ListFHIRImportJobs + // to a maximum quantity specified by the user. + MaxResults *int64 `min:"1" type:"integer"` + + // A pagination token used to identify the next page of results to return for + // a ListFHIRImportJobs query. + NextToken *string `type:"string"` + + // This parameter limits the response to FHIR import jobs submitted after a + // user specified date. + SubmittedAfter *time.Time `type:"timestamp"` + + // This parameter limits the response to FHIR import jobs submitted before a + // user specified date. + SubmittedBefore *time.Time `type:"timestamp"` } // String returns the string representation -func (s InternalServerException) String() string { +func (s ListFHIRImportJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InternalServerException) GoString() string { +func (s ListFHIRImportJobsInput) GoString() string { return s.String() } -func newErrorInternalServerException(v protocol.ResponseMetadata) error { - return &InternalServerException{ - RespMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFHIRImportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFHIRImportJobsInput"} + if s.DatastoreId == nil { + invalidParams.Add(request.NewErrParamRequired("DatastoreId")) + } + if s.DatastoreId != nil && len(*s.DatastoreId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatastoreId", 1)) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil } -// Code returns the exception type name. -func (s *InternalServerException) Code() string { - return "InternalServerException" +// SetDatastoreId sets the DatastoreId field's value. +func (s *ListFHIRImportJobsInput) SetDatastoreId(v string) *ListFHIRImportJobsInput { + s.DatastoreId = &v + return s } -// Message returns the exception's message. -func (s *InternalServerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetJobName sets the JobName field's value. +func (s *ListFHIRImportJobsInput) SetJobName(v string) *ListFHIRImportJobsInput { + s.JobName = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerException) OrigErr() error { - return nil +// SetJobStatus sets the JobStatus field's value. +func (s *ListFHIRImportJobsInput) SetJobStatus(v string) *ListFHIRImportJobsInput { + s.JobStatus = &v + return s } -func (s *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetMaxResults sets the MaxResults field's value. +func (s *ListFHIRImportJobsInput) SetMaxResults(v int64) *ListFHIRImportJobsInput { + s.MaxResults = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *InternalServerException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetNextToken sets the NextToken field's value. +func (s *ListFHIRImportJobsInput) SetNextToken(v string) *ListFHIRImportJobsInput { + s.NextToken = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *InternalServerException) RequestID() string { - return s.RespMetadata.RequestID +// SetSubmittedAfter sets the SubmittedAfter field's value. +func (s *ListFHIRImportJobsInput) SetSubmittedAfter(v time.Time) *ListFHIRImportJobsInput { + s.SubmittedAfter = &v + return s } -type ListFHIRDatastoresInput struct { - _ struct{} `type:"structure"` +// SetSubmittedBefore sets the SubmittedBefore field's value. +func (s *ListFHIRImportJobsInput) SetSubmittedBefore(v time.Time) *ListFHIRImportJobsInput { + s.SubmittedBefore = &v + return s +} - // Lists all filters associated with a FHIR Data Store request. - Filter *DatastoreFilter `type:"structure"` +type ListFHIRImportJobsOutput struct { + _ struct{} `type:"structure"` - // The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest - // call. - MaxResults *int64 `min:"1" type:"integer"` + // The properties of a listed FHIR import jobs, including the ID, ARN, name, + // and the status of the job. + // + // ImportJobPropertiesList is a required field + ImportJobPropertiesList []*ImportJobProperties `type:"list" required:"true"` - // Fetches the next page of Data Stores when results are paginated. + // A pagination token used to identify the next page of results to return for + // a ListFHIRImportJobs query. NextToken *string `type:"string"` } // String returns the string representation -func (s ListFHIRDatastoresInput) String() string { +func (s ListFHIRImportJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListFHIRDatastoresInput) GoString() string { +func (s ListFHIRImportJobsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListFHIRDatastoresInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListFHIRDatastoresInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Filter != nil { - if err := s.Filter.Validate(); err != nil { - invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilter sets the Filter field's value. -func (s *ListFHIRDatastoresInput) SetFilter(v *DatastoreFilter) *ListFHIRDatastoresInput { - s.Filter = v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListFHIRDatastoresInput) SetMaxResults(v int64) *ListFHIRDatastoresInput { - s.MaxResults = &v +// SetImportJobPropertiesList sets the ImportJobPropertiesList field's value. +func (s *ListFHIRImportJobsOutput) SetImportJobPropertiesList(v []*ImportJobProperties) *ListFHIRImportJobsOutput { + s.ImportJobPropertiesList = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListFHIRDatastoresInput) SetNextToken(v string) *ListFHIRDatastoresInput { +func (s *ListFHIRImportJobsOutput) SetNextToken(v string) *ListFHIRImportJobsOutput { s.NextToken = &v return s } -type ListFHIRDatastoresOutput struct { +type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // All properties associated with the listed Data Stores. + // The Amazon Resource Name(ARN) of the Data Store for which tags are being + // added. // - // DatastorePropertiesList is a required field - DatastorePropertiesList []*DatastoreProperties `type:"list" required:"true"` - - // Pagination token that can be used to retrieve the next page of results. - NextToken *string `type:"string"` + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListFHIRDatastoresOutput) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListFHIRDatastoresOutput) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } -// SetDatastorePropertiesList sets the DatastorePropertiesList field's value. -func (s *ListFHIRDatastoresOutput) SetDatastorePropertiesList(v []*DatastoreProperties) *ListFHIRDatastoresOutput { - s.DatastorePropertiesList = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListFHIRDatastoresOutput) SetNextToken(v string) *ListFHIRDatastoresOutput { - s.NextToken = &v +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Returns a list of tags associated with a Data Store. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v return s } @@ -1938,9 +2950,8 @@ func (s *ListFHIRDatastoresOutput) SetNextToken(v string) *ListFHIRDatastoresOut type OutputDataConfig struct { _ struct{} `type:"structure"` - // The S3Uri is the user specified S3 location to which data will be exported - // from a FHIR Data Store. - S3Uri *string `type:"string"` + // The output data configuration that was supplied when the export job was created. + S3Configuration *S3Configuration `type:"structure"` } // String returns the string representation @@ -1953,9 +2964,24 @@ func (s OutputDataConfig) GoString() string { return s.String() } -// SetS3Uri sets the S3Uri field's value. -func (s *OutputDataConfig) SetS3Uri(v string) *OutputDataConfig { - s.S3Uri = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputDataConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputDataConfig"} + if s.S3Configuration != nil { + if err := s.S3Configuration.Validate(); err != nil { + invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Configuration sets the S3Configuration field's value. +func (s *OutputDataConfig) SetS3Configuration(v *S3Configuration) *OutputDataConfig { + s.S3Configuration = v return s } @@ -2055,6 +3081,109 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } +// The configuration of the S3 bucket for either an import or export job. This +// includes assigning permissions for access. +type S3Configuration struct { + _ struct{} `type:"structure"` + + // The KMS key ID used to access the S3 bucket. + // + // KmsKeyId is a required field + KmsKeyId *string `min:"1" type:"string" required:"true"` + + // The S3Uri is the user specified S3 location of the FHIR data to be imported + // into Amazon HealthLake. + // + // S3Uri is a required field + S3Uri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Configuration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Configuration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Configuration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Configuration"} + if s.KmsKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyId")) + } + if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) + } + if s.S3Uri == nil { + invalidParams.Add(request.NewErrParamRequired("S3Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *S3Configuration) SetKmsKeyId(v string) *S3Configuration { + s.KmsKeyId = &v + return s +} + +// SetS3Uri sets the S3Uri field's value. +func (s *S3Configuration) SetS3Uri(v string) *S3Configuration { + s.S3Uri = &v + return s +} + +// The server-side encryption key configuration for a customer provided encryption +// key. +type SseConfiguration struct { + _ struct{} `type:"structure"` + + // The KMS encryption configuration used to provide details for data encryption. + // + // KmsEncryptionConfig is a required field + KmsEncryptionConfig *KmsEncryptionConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SseConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SseConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseConfiguration"} + if s.KmsEncryptionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("KmsEncryptionConfig")) + } + if s.KmsEncryptionConfig != nil { + if err := s.KmsEncryptionConfig.Validate(); err != nil { + invalidParams.AddNested("KmsEncryptionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsEncryptionConfig sets the KmsEncryptionConfig field's value. +func (s *SseConfiguration) SetKmsEncryptionConfig(v *KmsEncryptionConfig) *SseConfiguration { + s.KmsEncryptionConfig = v + return s +} + type StartFHIRExportJobInput struct { _ struct{} `type:"structure"` @@ -2115,6 +3244,11 @@ func (s *StartFHIRExportJobInput) Validate() error { if s.OutputDataConfig == nil { invalidParams.Add(request.NewErrParamRequired("OutputDataConfig")) } + if s.OutputDataConfig != nil { + if err := s.OutputDataConfig.Validate(); err != nil { + invalidParams.AddNested("OutputDataConfig", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2222,6 +3356,11 @@ type StartFHIRImportJobInput struct { // The name of the FHIR Import job in the StartFHIRImport job request. JobName *string `min:"1" type:"string"` + + // The output data configuration that was supplied when the export job was created. + // + // JobOutputDataConfig is a required field + JobOutputDataConfig *OutputDataConfig `type:"structure" required:"true"` } // String returns the string representation @@ -2258,6 +3397,14 @@ func (s *StartFHIRImportJobInput) Validate() error { if s.JobName != nil && len(*s.JobName) < 1 { invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } + if s.JobOutputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("JobOutputDataConfig")) + } + if s.JobOutputDataConfig != nil { + if err := s.JobOutputDataConfig.Validate(); err != nil { + invalidParams.AddNested("JobOutputDataConfig", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2295,6 +3442,12 @@ func (s *StartFHIRImportJobInput) SetJobName(v string) *StartFHIRImportJobInput return s } +// SetJobOutputDataConfig sets the JobOutputDataConfig field's value. +func (s *StartFHIRImportJobInput) SetJobOutputDataConfig(v *OutputDataConfig) *StartFHIRImportJobInput { + s.JobOutputDataConfig = v + return s +} + type StartFHIRImportJobOutput struct { _ struct{} `type:"structure"` @@ -2340,6 +3493,143 @@ func (s *StartFHIRImportJobOutput) SetJobStatus(v string) *StartFHIRImportJobOut return s } +// A tag is a label consisting of a user-defined key and value. The form for +// tags is {"Key", "Value"} +type Tag struct { + _ struct{} `type:"structure"` + + // The key portion of a tag. Tag keys are case sensitive. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value portion of tag. Tag values are case sensitive. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data + // Store which tags are being added to. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The user specified key and value pair tags being added to a Data Store. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // The user has exceeded their maximum number of allowed calls to the given // API. type ThrottlingException struct { @@ -2397,6 +3687,76 @@ func (s *ThrottlingException) RequestID() string { return s.RespMetadata.RequestID } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // "The Amazon Resource Name(ARN) of the Data Store for which tags are being + // removed + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The keys for the tags to be removed from the Healthlake Data Store. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // The user input parameter was invalid. type ValidationException struct { _ struct{} `type:"structure"` @@ -2453,6 +3813,22 @@ func (s *ValidationException) RequestID() string { return s.RespMetadata.RequestID } +const ( + // CmkTypeCustomerManagedKmsKey is a CmkType enum value + CmkTypeCustomerManagedKmsKey = "CUSTOMER_MANAGED_KMS_KEY" + + // CmkTypeAwsOwnedKmsKey is a CmkType enum value + CmkTypeAwsOwnedKmsKey = "AWS_OWNED_KMS_KEY" +) + +// CmkType_Values returns all elements of the CmkType enum +func CmkType_Values() []string { + return []string{ + CmkTypeCustomerManagedKmsKey, + CmkTypeAwsOwnedKmsKey, + } +} + const ( // DatastoreStatusCreating is a DatastoreStatus enum value DatastoreStatusCreating = "CREATING" @@ -2496,6 +3872,9 @@ const ( // JobStatusInProgress is a JobStatus enum value JobStatusInProgress = "IN_PROGRESS" + // JobStatusCompletedWithErrors is a JobStatus enum value + JobStatusCompletedWithErrors = "COMPLETED_WITH_ERRORS" + // JobStatusCompleted is a JobStatus enum value JobStatusCompleted = "COMPLETED" @@ -2508,6 +3887,7 @@ func JobStatus_Values() []string { return []string{ JobStatusSubmitted, JobStatusInProgress, + JobStatusCompletedWithErrors, JobStatusCompleted, JobStatusFailed, } diff --git a/service/healthlake/healthlakeiface/interface.go b/service/healthlake/healthlakeiface/interface.go index 4ccedd36581..b7ad5d74387 100644 --- a/service/healthlake/healthlakeiface/interface.go +++ b/service/healthlake/healthlakeiface/interface.go @@ -87,6 +87,24 @@ type HealthLakeAPI interface { ListFHIRDatastoresPages(*healthlake.ListFHIRDatastoresInput, func(*healthlake.ListFHIRDatastoresOutput, bool) bool) error ListFHIRDatastoresPagesWithContext(aws.Context, *healthlake.ListFHIRDatastoresInput, func(*healthlake.ListFHIRDatastoresOutput, bool) bool, ...request.Option) error + ListFHIRExportJobs(*healthlake.ListFHIRExportJobsInput) (*healthlake.ListFHIRExportJobsOutput, error) + ListFHIRExportJobsWithContext(aws.Context, *healthlake.ListFHIRExportJobsInput, ...request.Option) (*healthlake.ListFHIRExportJobsOutput, error) + ListFHIRExportJobsRequest(*healthlake.ListFHIRExportJobsInput) (*request.Request, *healthlake.ListFHIRExportJobsOutput) + + ListFHIRExportJobsPages(*healthlake.ListFHIRExportJobsInput, func(*healthlake.ListFHIRExportJobsOutput, bool) bool) error + ListFHIRExportJobsPagesWithContext(aws.Context, *healthlake.ListFHIRExportJobsInput, func(*healthlake.ListFHIRExportJobsOutput, bool) bool, ...request.Option) error + + ListFHIRImportJobs(*healthlake.ListFHIRImportJobsInput) (*healthlake.ListFHIRImportJobsOutput, error) + ListFHIRImportJobsWithContext(aws.Context, *healthlake.ListFHIRImportJobsInput, ...request.Option) (*healthlake.ListFHIRImportJobsOutput, error) + ListFHIRImportJobsRequest(*healthlake.ListFHIRImportJobsInput) (*request.Request, *healthlake.ListFHIRImportJobsOutput) + + ListFHIRImportJobsPages(*healthlake.ListFHIRImportJobsInput, func(*healthlake.ListFHIRImportJobsOutput, bool) bool) error + ListFHIRImportJobsPagesWithContext(aws.Context, *healthlake.ListFHIRImportJobsInput, func(*healthlake.ListFHIRImportJobsOutput, bool) bool, ...request.Option) error + + ListTagsForResource(*healthlake.ListTagsForResourceInput) (*healthlake.ListTagsForResourceOutput, error) + ListTagsForResourceWithContext(aws.Context, *healthlake.ListTagsForResourceInput, ...request.Option) (*healthlake.ListTagsForResourceOutput, error) + ListTagsForResourceRequest(*healthlake.ListTagsForResourceInput) (*request.Request, *healthlake.ListTagsForResourceOutput) + StartFHIRExportJob(*healthlake.StartFHIRExportJobInput) (*healthlake.StartFHIRExportJobOutput, error) StartFHIRExportJobWithContext(aws.Context, *healthlake.StartFHIRExportJobInput, ...request.Option) (*healthlake.StartFHIRExportJobOutput, error) StartFHIRExportJobRequest(*healthlake.StartFHIRExportJobInput) (*request.Request, *healthlake.StartFHIRExportJobOutput) @@ -94,6 +112,14 @@ type HealthLakeAPI interface { StartFHIRImportJob(*healthlake.StartFHIRImportJobInput) (*healthlake.StartFHIRImportJobOutput, error) StartFHIRImportJobWithContext(aws.Context, *healthlake.StartFHIRImportJobInput, ...request.Option) (*healthlake.StartFHIRImportJobOutput, error) StartFHIRImportJobRequest(*healthlake.StartFHIRImportJobInput) (*request.Request, *healthlake.StartFHIRImportJobOutput) + + TagResource(*healthlake.TagResourceInput) (*healthlake.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *healthlake.TagResourceInput, ...request.Option) (*healthlake.TagResourceOutput, error) + TagResourceRequest(*healthlake.TagResourceInput) (*request.Request, *healthlake.TagResourceOutput) + + UntagResource(*healthlake.UntagResourceInput) (*healthlake.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *healthlake.UntagResourceInput, ...request.Option) (*healthlake.UntagResourceOutput, error) + UntagResourceRequest(*healthlake.UntagResourceInput) (*request.Request, *healthlake.UntagResourceOutput) } var _ HealthLakeAPI = (*healthlake.HealthLake)(nil) diff --git a/service/lightsail/api.go b/service/lightsail/api.go index 72ebd377570..b7be82604f5 100644 --- a/service/lightsail/api.go +++ b/service/lightsail/api.go @@ -278,7 +278,7 @@ func (c *Lightsail) AttachDiskRequest(input *AttachDiskInput) (req *request.Requ // // The attach disk operation supports tag-based access control via resource // tags applied to the resource identified by disk name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -389,7 +389,7 @@ func (c *Lightsail) AttachInstancesToLoadBalancerRequest(input *AttachInstancesT // // The attach instances to load balancer operation supports tag-based access // control via resource tags applied to the resource identified by load balancer -// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// name. For more information, see the Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -504,7 +504,7 @@ func (c *Lightsail) AttachLoadBalancerTlsCertificateRequest(input *AttachLoadBal // // The AttachLoadBalancerTlsCertificate operation supports tag-based access // control via resource tags applied to the resource identified by load balancer -// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -716,7 +716,7 @@ func (c *Lightsail) CloseInstancePublicPortsRequest(input *CloseInstancePublicPo // // The CloseInstancePublicPorts action supports tag-based access control via // resource tags applied to the resource identified by instanceName. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -890,6 +890,215 @@ func (c *Lightsail) CopySnapshotWithContext(ctx aws.Context, input *CopySnapshot return out, req.Send() } +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucket +func (c *Lightsail) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Lightsail. +// +// Creates an Amazon Lightsail bucket. +// +// A bucket is a cloud storage resource available in the Lightsail object storage +// service. Use buckets to store objects such as data and its descriptive metadata. +// For more information about buckets, see Buckets in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/buckets-in-amazon-lightsail) +// in the Amazon Lightsail Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation CreateBucket for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucket +func (c *Lightsail) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucketAccessKey = "CreateBucketAccessKey" + +// CreateBucketAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucketAccessKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucketAccessKey for more information on using the CreateBucketAccessKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketAccessKeyRequest method. +// req, resp := client.CreateBucketAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucketAccessKey +func (c *Lightsail) CreateBucketAccessKeyRequest(input *CreateBucketAccessKeyInput) (req *request.Request, output *CreateBucketAccessKeyOutput) { + op := &request.Operation{ + Name: opCreateBucketAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBucketAccessKeyInput{} + } + + output = &CreateBucketAccessKeyOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucketAccessKey API operation for Amazon Lightsail. +// +// Creates a new access key for the specified Amazon Lightsail bucket. Access +// keys consist of an access key ID and corresponding secret access key. +// +// Access keys grant full programmatic access to the specified bucket and its +// objects. You can have a maximum of two access keys per bucket. Use the GetBucketAccessKeys +// action to get a list of current access keys for a specific bucket. For more +// information about access keys, see Creating access keys for a bucket in Amazon +// Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-bucket-access-keys) +// in the Amazon Lightsail Developer Guide. +// +// The secretAccessKey value is returned only in response to the CreateBucketAccessKey +// action. You can get a secret access key only when you first create an access +// key; you cannot get the secret access key later. If you lose the secret access +// key, you must create a new access key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation CreateBucketAccessKey for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateBucketAccessKey +func (c *Lightsail) CreateBucketAccessKey(input *CreateBucketAccessKeyInput) (*CreateBucketAccessKeyOutput, error) { + req, out := c.CreateBucketAccessKeyRequest(input) + return out, req.Send() +} + +// CreateBucketAccessKeyWithContext is the same as CreateBucketAccessKey with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucketAccessKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) CreateBucketAccessKeyWithContext(ctx aws.Context, input *CreateBucketAccessKeyInput, opts ...request.Option) (*CreateBucketAccessKeyOutput, error) { + req, out := c.CreateBucketAccessKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateCertificate = "CreateCertificate" // CreateCertificateRequest generates a "aws/request.Request" representing the @@ -1374,7 +1583,7 @@ func (c *Lightsail) CreateContainerServiceDeploymentRequest(input *CreateContain // from a public registry like Docker Hub, or from your local machine. For more // information, see Creating container images for your Amazon Lightsail container // services (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-container-images) -// in the Lightsail Dev Guide. +// in the Amazon Lightsail Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1491,7 +1700,7 @@ func (c *Lightsail) CreateContainerServiceRegistryLoginRequest(input *CreateCont // (lightsailctl) plugin to push container images to your Lightsail container // service. For more information, see Pushing and managing container images // on your Amazon Lightsail container services (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-pushing-container-images) -// in the Lightsail Dev Guide. +// in the Amazon Lightsail Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1592,7 +1801,7 @@ func (c *Lightsail) CreateDiskRequest(input *CreateDiskInput) (req *request.Requ // instance in the same Availability Zone (e.g., us-east-2a). // // The create disk operation supports tag-based access control via request tags. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1702,7 +1911,8 @@ func (c *Lightsail) CreateDiskFromSnapshotRequest(input *CreateDiskFromSnapshotI // // The create disk from snapshot operation supports tag-based access control // via request tags and resource tags applied to the resource identified by -// disk snapshot name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// disk snapshot name. For more information, see the Amazon Lightsail Developer +// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1832,7 +2042,8 @@ func (c *Lightsail) CreateDiskSnapshotRequest(input *CreateDiskSnapshotInput) (r // disk. // // The create disk snapshot operation supports tag-based access control via -// request tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// request tags. For more information, see the Amazon Lightsail Developer Guide +// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2044,7 +2255,7 @@ func (c *Lightsail) CreateDomainRequest(input *CreateDomainInput) (req *request. // Creates a domain resource for the specified domain (e.g., example.com). // // The create domain operation supports tag-based access control via request -// tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// tags. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2154,7 +2365,7 @@ func (c *Lightsail) CreateDomainEntryRequest(input *CreateDomainEntryInput) (req // // The create domain entry operation supports tag-based access control via resource // tags applied to the resource identified by domain name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2262,7 +2473,8 @@ func (c *Lightsail) CreateInstanceSnapshotRequest(input *CreateInstanceSnapshotI // can use a snapshot to create a new instance that is based on that snapshot. // // The create instance snapshot operation supports tag-based access control -// via request tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// via request tags. For more information, see the Amazon Lightsail Developer +// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2369,7 +2581,7 @@ func (c *Lightsail) CreateInstancesRequest(input *CreateInstancesInput) (req *re // Creates one or more Amazon Lightsail instances. // // The create instances operation supports tag-based access control via request -// tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// tags. For more information, see the Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2478,8 +2690,8 @@ func (c *Lightsail) CreateInstancesFromSnapshotRequest(input *CreateInstancesFro // // The create instances from snapshot operation supports tag-based access control // via request tags and resource tags applied to the resource identified by -// instance snapshot name. For more information, see the Lightsail Dev Guide -// (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// instance snapshot name. For more information, see the Amazon Lightsail Developer +// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2586,7 +2798,7 @@ func (c *Lightsail) CreateKeyPairRequest(input *CreateKeyPairInput) (req *reques // Creates an SSH key pair. // // The create key pair operation supports tag-based access control via request -// tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// tags. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2692,7 +2904,7 @@ func (c *Lightsail) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (r // // Creates a Lightsail load balancer. To learn more about deciding whether to // load balance your application, see Configure your Lightsail instances for -// load balancing (https://lightsail.aws.amazon.com/ls/docs/how-to/article/configure-lightsail-instances-for-load-balancing). +// load balancing (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/configure-lightsail-instances-for-load-balancing). // You can create up to 5 load balancers per AWS Region in your account. // // When you create a load balancer, you can specify a unique name and port settings. @@ -2700,7 +2912,8 @@ func (c *Lightsail) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (r // operation. // // The create load balancer operation supports tag-based access control via -// request tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// request tags. For more information, see the Amazon Lightsail Developer Guide +// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2810,7 +3023,7 @@ func (c *Lightsail) CreateLoadBalancerTlsCertificateRequest(input *CreateLoadBal // // The CreateLoadBalancerTlsCertificate operation supports tag-based access // control via resource tags applied to the resource identified by load balancer -// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2917,7 +3130,8 @@ func (c *Lightsail) CreateRelationalDatabaseRequest(input *CreateRelationalDatab // Creates a new database in Amazon Lightsail. // // The create relational database operation supports tag-based access control -// via request tags. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// via request tags. For more information, see the Amazon Lightsail Developer +// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3030,7 +3244,7 @@ func (c *Lightsail) CreateRelationalDatabaseFromSnapshotRequest(input *CreateRel // The create relational database from snapshot operation supports tag-based // access control via request tags and resource tags applied to the resource // identified by relationalDatabaseSnapshotName. For more information, see the -// Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3139,8 +3353,8 @@ func (c *Lightsail) CreateRelationalDatabaseSnapshotRequest(input *CreateRelatio // a database. // // The create relational database snapshot operation supports tag-based access -// control via request tags. For more information, see the Lightsail Dev Guide -// (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// control via request tags. For more information, see the Amazon Lightsail +// Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3350,7 +3564,7 @@ func (c *Lightsail) DeleteAutoSnapshotRequest(input *DeleteAutoSnapshotInput) (r // DeleteAutoSnapshot API operation for Amazon Lightsail. // // Deletes an automatic snapshot of an instance or disk. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3406,6 +3620,209 @@ func (c *Lightsail) DeleteAutoSnapshotWithContext(ctx aws.Context, input *Delete return out, req.Send() } +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucket +func (c *Lightsail) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBucket API operation for Amazon Lightsail. +// +// Deletes a Amazon Lightsail bucket. +// +// When you delete your bucket, the bucket name is released and can be reused +// for a new bucket in your account or another AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation DeleteBucket for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucket +func (c *Lightsail) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAccessKey = "DeleteBucketAccessKey" + +// DeleteBucketAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAccessKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketAccessKey for more information on using the DeleteBucketAccessKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketAccessKeyRequest method. +// req, resp := client.DeleteBucketAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucketAccessKey +func (c *Lightsail) DeleteBucketAccessKeyRequest(input *DeleteBucketAccessKeyInput) (req *request.Request, output *DeleteBucketAccessKeyOutput) { + op := &request.Operation{ + Name: opDeleteBucketAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBucketAccessKeyInput{} + } + + output = &DeleteBucketAccessKeyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBucketAccessKey API operation for Amazon Lightsail. +// +// Deletes an access key for the specified Amazon Lightsail bucket. +// +// We recommend that you delete an access key if the secret access key is compromised. +// +// For more information about access keys, see Creating access keys for a bucket +// in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-bucket-access-keys) +// in the Amazon Lightsail Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation DeleteBucketAccessKey for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteBucketAccessKey +func (c *Lightsail) DeleteBucketAccessKey(input *DeleteBucketAccessKeyInput) (*DeleteBucketAccessKeyOutput, error) { + req, out := c.DeleteBucketAccessKeyRequest(input) + return out, req.Send() +} + +// DeleteBucketAccessKeyWithContext is the same as DeleteBucketAccessKey with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAccessKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) DeleteBucketAccessKeyWithContext(ctx aws.Context, input *DeleteBucketAccessKeyInput, opts ...request.Option) (*DeleteBucketAccessKeyOutput, error) { + req, out := c.DeleteBucketAccessKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteCertificate = "DeleteCertificate" // DeleteCertificateRequest generates a "aws/request.Request" representing the @@ -3862,7 +4279,7 @@ func (c *Lightsail) DeleteDiskRequest(input *DeleteDiskInput) (req *request.Requ // // The delete disk operation supports tag-based access control via resource // tags applied to the resource identified by disk name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3977,7 +4394,7 @@ func (c *Lightsail) DeleteDiskSnapshotRequest(input *DeleteDiskSnapshotInput) (r // // The delete disk snapshot operation supports tag-based access control via // resource tags applied to the resource identified by disk snapshot name. For -// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4185,7 +4602,7 @@ func (c *Lightsail) DeleteDomainRequest(input *DeleteDomainInput) (req *request. // // The delete domain operation supports tag-based access control via resource // tags applied to the resource identified by domain name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4293,7 +4710,7 @@ func (c *Lightsail) DeleteDomainEntryRequest(input *DeleteDomainEntryInput) (req // // The delete domain entry operation supports tag-based access control via resource // tags applied to the resource identified by domain name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4401,7 +4818,7 @@ func (c *Lightsail) DeleteInstanceRequest(input *DeleteInstanceInput) (req *requ // // The delete instance operation supports tag-based access control via resource // tags applied to the resource identified by instance name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4509,7 +4926,7 @@ func (c *Lightsail) DeleteInstanceSnapshotRequest(input *DeleteInstanceSnapshotI // // The delete instance snapshot operation supports tag-based access control // via resource tags applied to the resource identified by instance snapshot -// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4617,7 +5034,7 @@ func (c *Lightsail) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *reques // // The delete key pair operation supports tag-based access control via resource // tags applied to the resource identified by key pair name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4729,7 +5146,7 @@ func (c *Lightsail) DeleteKnownHostKeysRequest(input *DeleteKnownHostKeysInput) // Perform this operation only if you were expecting the host key or certificate // mismatch or if you are familiar with the new host key or certificate on the // instance. For more information, see Troubleshooting connection issues when -// using the Amazon Lightsail browser-based SSH or RDP client (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-troubleshooting-browser-based-ssh-rdp-client-connection). +// using the Amazon Lightsail browser-based SSH or RDP client (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-troubleshooting-browser-based-ssh-rdp-client-connection). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4839,7 +5256,7 @@ func (c *Lightsail) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (r // // The delete load balancer operation supports tag-based access control via // resource tags applied to the resource identified by load balancer name. For -// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4947,7 +5364,7 @@ func (c *Lightsail) DeleteLoadBalancerTlsCertificateRequest(input *DeleteLoadBal // // The DeleteLoadBalancerTlsCertificate operation supports tag-based access // control via resource tags applied to the resource identified by load balancer -// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5055,7 +5472,7 @@ func (c *Lightsail) DeleteRelationalDatabaseRequest(input *DeleteRelationalDatab // // The delete relational database operation supports tag-based access control // via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5163,7 +5580,7 @@ func (c *Lightsail) DeleteRelationalDatabaseSnapshotRequest(input *DeleteRelatio // // The delete relational database snapshot operation supports tag-based access // control via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5377,7 +5794,7 @@ func (c *Lightsail) DetachDiskRequest(input *DetachDiskInput) (req *request.Requ // // The detach disk operation supports tag-based access control via resource // tags applied to the resource identified by disk name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5488,7 +5905,7 @@ func (c *Lightsail) DetachInstancesFromLoadBalancerRequest(input *DetachInstance // // The detach instances from load balancer operation supports tag-based access // control via resource tags applied to the resource identified by load balancer -// name. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// name. For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5697,7 +6114,7 @@ func (c *Lightsail) DisableAddOnRequest(input *DisableAddOnInput) (req *request. // DisableAddOn API operation for Amazon Lightsail. // // Disables an add-on for an Amazon Lightsail resource. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5902,7 +6319,7 @@ func (c *Lightsail) EnableAddOnRequest(input *EnableAddOnInput) (req *request.Re // EnableAddOn API operation for Amazon Lightsail. // // Enables or modifies an add-on for an Amazon Lightsail resource. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). +// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6015,7 +6432,7 @@ func (c *Lightsail) ExportSnapshotRequest(input *ExportSnapshotInput) (req *requ // // The export snapshot operation supports tag-based access control via resource // tags applied to the resource identified by source snapshot name. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Use the get instance snapshots or get disk snapshots operations to get a // list of snapshots that you can export to Amazon EC2. @@ -6334,7 +6751,7 @@ func (c *Lightsail) GetAutoSnapshotsRequest(input *GetAutoSnapshotsInput) (req * // GetAutoSnapshots API operation for Amazon Lightsail. // // Returns the available automatic snapshots for an instance or disk. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). +// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6502,6 +6919,408 @@ func (c *Lightsail) GetBlueprintsWithContext(ctx aws.Context, input *GetBlueprin return out, req.Send() } +const opGetBucketAccessKeys = "GetBucketAccessKeys" + +// GetBucketAccessKeysRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccessKeys operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAccessKeys for more information on using the GetBucketAccessKeys +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAccessKeysRequest method. +// req, resp := client.GetBucketAccessKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketAccessKeys +func (c *Lightsail) GetBucketAccessKeysRequest(input *GetBucketAccessKeysInput) (req *request.Request, output *GetBucketAccessKeysOutput) { + op := &request.Operation{ + Name: opGetBucketAccessKeys, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBucketAccessKeysInput{} + } + + output = &GetBucketAccessKeysOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccessKeys API operation for Amazon Lightsail. +// +// Returns the existing access key IDs for the specified Amazon Lightsail bucket. +// +// This action does not return the secret access key value of an access key. +// You can get a secret access key only when you create it from the response +// of the CreateBucketAccessKey action. If you lose the secret access key, you +// must create a new access key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation GetBucketAccessKeys for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketAccessKeys +func (c *Lightsail) GetBucketAccessKeys(input *GetBucketAccessKeysInput) (*GetBucketAccessKeysOutput, error) { + req, out := c.GetBucketAccessKeysRequest(input) + return out, req.Send() +} + +// GetBucketAccessKeysWithContext is the same as GetBucketAccessKeys with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccessKeys for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) GetBucketAccessKeysWithContext(ctx aws.Context, input *GetBucketAccessKeysInput, opts ...request.Option) (*GetBucketAccessKeysOutput, error) { + req, out := c.GetBucketAccessKeysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketBundles = "GetBucketBundles" + +// GetBucketBundlesRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketBundles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketBundles for more information on using the GetBucketBundles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketBundlesRequest method. +// req, resp := client.GetBucketBundlesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketBundles +func (c *Lightsail) GetBucketBundlesRequest(input *GetBucketBundlesInput) (req *request.Request, output *GetBucketBundlesOutput) { + op := &request.Operation{ + Name: opGetBucketBundles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBucketBundlesInput{} + } + + output = &GetBucketBundlesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketBundles API operation for Amazon Lightsail. +// +// Returns the bundles that you can apply to a Amazon Lightsail bucket. +// +// The bucket bundle specifies the monthly cost, storage quota, and data transfer +// quota for a bucket. +// +// Use the UpdateBucketBundle action to update the bundle for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation GetBucketBundles for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketBundles +func (c *Lightsail) GetBucketBundles(input *GetBucketBundlesInput) (*GetBucketBundlesOutput, error) { + req, out := c.GetBucketBundlesRequest(input) + return out, req.Send() +} + +// GetBucketBundlesWithContext is the same as GetBucketBundles with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketBundles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) GetBucketBundlesWithContext(ctx aws.Context, input *GetBucketBundlesInput, opts ...request.Option) (*GetBucketBundlesOutput, error) { + req, out := c.GetBucketBundlesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricData = "GetBucketMetricData" + +// GetBucketMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketMetricData for more information on using the GetBucketMetricData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketMetricDataRequest method. +// req, resp := client.GetBucketMetricDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketMetricData +func (c *Lightsail) GetBucketMetricDataRequest(input *GetBucketMetricDataInput) (req *request.Request, output *GetBucketMetricDataOutput) { + op := &request.Operation{ + Name: opGetBucketMetricData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBucketMetricDataInput{} + } + + output = &GetBucketMetricDataOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricData API operation for Amazon Lightsail. +// +// Returns the data points of a specific metric for an Amazon Lightsail bucket. +// +// Metrics report the utilization of a bucket. View and collect metric data +// regularly to monitor the number of objects stored in a bucket (including +// object versions) and the storage space used by those objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation GetBucketMetricData for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBucketMetricData +func (c *Lightsail) GetBucketMetricData(input *GetBucketMetricDataInput) (*GetBucketMetricDataOutput, error) { + req, out := c.GetBucketMetricDataRequest(input) + return out, req.Send() +} + +// GetBucketMetricDataWithContext is the same as GetBucketMetricData with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) GetBucketMetricDataWithContext(ctx aws.Context, input *GetBucketMetricDataInput, opts ...request.Option) (*GetBucketMetricDataOutput, error) { + req, out := c.GetBucketMetricDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBuckets = "GetBuckets" + +// GetBucketsRequest generates a "aws/request.Request" representing the +// client's request for the GetBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBuckets for more information on using the GetBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketsRequest method. +// req, resp := client.GetBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBuckets +func (c *Lightsail) GetBucketsRequest(input *GetBucketsInput) (req *request.Request, output *GetBucketsOutput) { + op := &request.Operation{ + Name: opGetBuckets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBucketsInput{} + } + + output = &GetBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBuckets API operation for Amazon Lightsail. +// +// Returns information about one or more Amazon Lightsail buckets. +// +// For more information about buckets, see Buckets in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/buckets-in-amazon-lightsail) +// in the Amazon Lightsail Developer Guide.. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation GetBuckets for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetBuckets +func (c *Lightsail) GetBuckets(input *GetBucketsInput) (*GetBucketsOutput, error) { + req, out := c.GetBucketsRequest(input) + return out, req.Send() +} + +// GetBucketsWithContext is the same as GetBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See GetBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) GetBucketsWithContext(ctx aws.Context, input *GetBucketsInput, opts ...request.Option) (*GetBucketsOutput, error) { + req, out := c.GetBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBundles = "GetBundles" // GetBundlesRequest generates a "aws/request.Request" representing the @@ -8090,7 +8909,7 @@ func (c *Lightsail) GetDistributionBundlesRequest(input *GetDistributionBundlesI // GetDistributionBundles API operation for Amazon Lightsail. // -// Returns the list bundles that can be applied to you Amazon Lightsail content +// Returns the bundles that can be applied to your Amazon Lightsail content // delivery network (CDN) distributions. // // A distribution bundle specifies the monthly network transfer quota and monthly @@ -8709,11 +9528,11 @@ func (c *Lightsail) GetExportSnapshotRecordsRequest(input *GetExportSnapshotReco // GetExportSnapshotRecords API operation for Amazon Lightsail. // -// Returns the export snapshot record created as a result of the export snapshot +// Returns all export snapshot records created as a result of the export snapshot // operation. // // An export snapshot record can be used to create a new Amazon EC2 instance -// and its related resources with the create cloud formation stack operation. +// and its related resources with the CreateCloudFormationStack action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8927,7 +9746,7 @@ func (c *Lightsail) GetInstanceAccessDetailsRequest(input *GetInstanceAccessDeta // // The get instance access details operation supports tag-based access control // via resource tags applied to the resource identified by instance name. For -// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12413,7 +13232,7 @@ func (c *Lightsail) OpenInstancePublicPortsRequest(input *OpenInstancePublicPort // // The OpenInstancePublicPorts action supports tag-based access control via // resource tags applied to the resource identified by instanceName. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12517,7 +13336,7 @@ func (c *Lightsail) PeerVpcRequest(input *PeerVpcInput) (req *request.Request, o // PeerVpc API operation for Amazon Lightsail. // -// Tries to peer the Lightsail VPC with the user's default VPC. +// Peers the Lightsail VPC with the user's default VPC. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12743,7 +13562,7 @@ func (c *Lightsail) PutInstancePublicPortsRequest(input *PutInstancePublicPortsI // // The PutInstancePublicPorts action supports tag-based access control via resource // tags applied to the resource identified by instanceName. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12851,7 +13670,7 @@ func (c *Lightsail) RebootInstanceRequest(input *RebootInstanceInput) (req *requ // // The reboot instance operation supports tag-based access control via resource // tags applied to the resource identified by instance name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12959,7 +13778,7 @@ func (c *Lightsail) RebootRelationalDatabaseRequest(input *RebootRelationalDatab // // The reboot relational database operation supports tag-based access control // via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13069,7 +13888,7 @@ func (c *Lightsail) RegisterContainerImageRequest(input *RegisterContainerImageI // (lightsailctl) plugin to push container images to your Lightsail container // service. For more information, see Pushing and managing container images // on your Amazon Lightsail container services (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-pushing-container-images) -// in the Lightsail Dev Guide. +// in the Amazon Lightsail Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13552,6 +14371,107 @@ func (c *Lightsail) SetIpAddressTypeWithContext(ctx aws.Context, input *SetIpAdd return out, req.Send() } +const opSetResourceAccessForBucket = "SetResourceAccessForBucket" + +// SetResourceAccessForBucketRequest generates a "aws/request.Request" representing the +// client's request for the SetResourceAccessForBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetResourceAccessForBucket for more information on using the SetResourceAccessForBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetResourceAccessForBucketRequest method. +// req, resp := client.SetResourceAccessForBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SetResourceAccessForBucket +func (c *Lightsail) SetResourceAccessForBucketRequest(input *SetResourceAccessForBucketInput) (req *request.Request, output *SetResourceAccessForBucketOutput) { + op := &request.Operation{ + Name: opSetResourceAccessForBucket, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetResourceAccessForBucketInput{} + } + + output = &SetResourceAccessForBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// SetResourceAccessForBucket API operation for Amazon Lightsail. +// +// Sets the Amazon Lightsail resources that can access the specified Lightsail +// bucket. +// +// Lightsail buckets currently support setting access for Lightsail instances +// in the same AWS Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation SetResourceAccessForBucket for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SetResourceAccessForBucket +func (c *Lightsail) SetResourceAccessForBucket(input *SetResourceAccessForBucketInput) (*SetResourceAccessForBucketOutput, error) { + req, out := c.SetResourceAccessForBucketRequest(input) + return out, req.Send() +} + +// SetResourceAccessForBucketWithContext is the same as SetResourceAccessForBucket with the addition of +// the ability to pass a context and additional request options. +// +// See SetResourceAccessForBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) SetResourceAccessForBucketWithContext(ctx aws.Context, input *SetResourceAccessForBucketInput, opts ...request.Option) (*SetResourceAccessForBucketOutput, error) { + req, out := c.SetResourceAccessForBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartInstance = "StartInstance" // StartInstanceRequest generates a "aws/request.Request" representing the @@ -13602,11 +14522,11 @@ func (c *Lightsail) StartInstanceRequest(input *StartInstanceInput) (req *reques // When you start a stopped instance, Lightsail assigns a new public IP address // to the instance. To use the same IP address after stopping and starting an // instance, create a static IP address and attach it to the instance. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). +// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/lightsail-create-static-ip). // // The start instance operation supports tag-based access control via resource // tags applied to the resource identified by instance name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13715,7 +14635,7 @@ func (c *Lightsail) StartRelationalDatabaseRequest(input *StartRelationalDatabas // // The start relational database operation supports tag-based access control // via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13824,11 +14744,11 @@ func (c *Lightsail) StopInstanceRequest(input *StopInstanceInput) (req *request. // When you start a stopped instance, Lightsail assigns a new public IP address // to the instance. To use the same IP address after stopping and starting an // instance, create a static IP address and attach it to the instance. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). +// information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/lightsail-create-static-ip). // // The stop instance operation supports tag-based access control via resource // tags applied to the resource identified by instance name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13936,7 +14856,7 @@ func (c *Lightsail) StopRelationalDatabaseRequest(input *StopRelationalDatabaseI // // The stop relational database operation supports tag-based access control // via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14043,11 +14963,11 @@ func (c *Lightsail) TagResourceRequest(input *TagResourceInput) (req *request.Re // Adds one or more tags to the specified Amazon Lightsail resource. Each resource // can have a maximum of 50 tags. Each tag consists of a key and an optional // value. Tag keys must be unique per resource. For more information about tags, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). // // The tag resource operation supports tag-based access control via request // tags and resource tags applied to the resource identified by resource name. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14259,7 +15179,7 @@ func (c *Lightsail) UnpeerVpcRequest(input *UnpeerVpcInput) (req *request.Reques // UnpeerVpc API operation for Amazon Lightsail. // -// Attempts to unpeer the Lightsail VPC from the user's default VPC. +// Unpeers the Lightsail VPC from the user's default VPC. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14368,7 +15288,7 @@ func (c *Lightsail) UntagResourceRequest(input *UntagResourceInput) (req *reques // // The untag resource operation supports tag-based access control via request // tags and resource tags applied to the resource identified by resource name. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14428,63 +15348,67 @@ func (c *Lightsail) UntagResourceWithContext(ctx aws.Context, input *UntagResour return out, req.Send() } -const opUpdateContainerService = "UpdateContainerService" +const opUpdateBucket = "UpdateBucket" -// UpdateContainerServiceRequest generates a "aws/request.Request" representing the -// client's request for the UpdateContainerService operation. The "output" return +// UpdateBucketRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBucket operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateContainerService for more information on using the UpdateContainerService +// See UpdateBucket for more information on using the UpdateBucket // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateContainerServiceRequest method. -// req, resp := client.UpdateContainerServiceRequest(params) +// // Example sending a request using the UpdateBucketRequest method. +// req, resp := client.UpdateBucketRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateContainerService -func (c *Lightsail) UpdateContainerServiceRequest(input *UpdateContainerServiceInput) (req *request.Request, output *UpdateContainerServiceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucket +func (c *Lightsail) UpdateBucketRequest(input *UpdateBucketInput) (req *request.Request, output *UpdateBucketOutput) { op := &request.Operation{ - Name: opUpdateContainerService, + Name: opUpdateBucket, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateContainerServiceInput{} + input = &UpdateBucketInput{} } - output = &UpdateContainerServiceOutput{} + output = &UpdateBucketOutput{} req = c.newRequest(op, input, output) return } -// UpdateContainerService API operation for Amazon Lightsail. +// UpdateBucket API operation for Amazon Lightsail. // -// Updates the configuration of your Amazon Lightsail container service, such -// as its power, scale, and public domain names. +// Updates an existing Amazon Lightsail bucket. +// +// Use this action to update the configuration of an existing bucket, such as +// versioning, public accessibility, and the AWS accounts that can access the +// bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UpdateContainerService for usage and error information. +// API operation UpdateBucket for usage and error information. // // Returned Error Types: -// * ServiceException -// A general service exception. +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. // // * InvalidInputException // Lightsail throws this exception when user input does not conform to the validation @@ -14497,89 +15421,199 @@ func (c *Lightsail) UpdateContainerServiceRequest(input *UpdateContainerServiceI // * NotFoundException // Lightsail throws this exception when it cannot find a resource. // +// * ServiceException +// A general service exception. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucket +func (c *Lightsail) UpdateBucket(input *UpdateBucketInput) (*UpdateBucketOutput, error) { + req, out := c.UpdateBucketRequest(input) + return out, req.Send() +} + +// UpdateBucketWithContext is the same as UpdateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateBucketWithContext(ctx aws.Context, input *UpdateBucketInput, opts ...request.Option) (*UpdateBucketOutput, error) { + req, out := c.UpdateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateBucketBundle = "UpdateBucketBundle" + +// UpdateBucketBundleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBucketBundle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBucketBundle for more information on using the UpdateBucketBundle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBucketBundleRequest method. +// req, resp := client.UpdateBucketBundleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucketBundle +func (c *Lightsail) UpdateBucketBundleRequest(input *UpdateBucketBundleInput) (req *request.Request, output *UpdateBucketBundleOutput) { + op := &request.Operation{ + Name: opUpdateBucketBundle, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBucketBundleInput{} + } + + output = &UpdateBucketBundleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBucketBundle API operation for Amazon Lightsail. +// +// Updates the bundle, or storage plan, of an existing Amazon Lightsail bucket. +// +// A bucket bundle specifies the monthly cost, storage space, and data transfer +// quota for a bucket. You can update a bucket's bundle only one time within +// a monthly AWS billing cycle. To determine if you can update a bucket's bundle, +// use the GetBuckets action. The ableToUpdateBundle parameter in the response +// will indicate whether you can currently update a bucket's bundle. +// +// Update a bucket's bundle if it's consistently going over its storage space +// or data transfer quota, or if a bucket's usage is consistently in the lower +// range of its storage space or data transfer quota. Due to the unpredictable +// usage fluctuations that a bucket might experience, we strongly recommend +// that you update a bucket's bundle only as a long-term strategy, instead of +// as a short-term, monthly cost-cutting measure. Choose a bucket bundle that +// will provide the bucket with ample storage space and data transfer for a +// long time to come. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateBucketBundle for usage and error information. +// +// Returned Error Types: // * AccessDeniedException // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * ServiceException +// A general service exception. +// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateContainerService -func (c *Lightsail) UpdateContainerService(input *UpdateContainerServiceInput) (*UpdateContainerServiceOutput, error) { - req, out := c.UpdateContainerServiceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateBucketBundle +func (c *Lightsail) UpdateBucketBundle(input *UpdateBucketBundleInput) (*UpdateBucketBundleOutput, error) { + req, out := c.UpdateBucketBundleRequest(input) return out, req.Send() } -// UpdateContainerServiceWithContext is the same as UpdateContainerService with the addition of +// UpdateBucketBundleWithContext is the same as UpdateBucketBundle with the addition of // the ability to pass a context and additional request options. // -// See UpdateContainerService for details on how to use this API operation. +// See UpdateBucketBundle for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UpdateContainerServiceWithContext(ctx aws.Context, input *UpdateContainerServiceInput, opts ...request.Option) (*UpdateContainerServiceOutput, error) { - req, out := c.UpdateContainerServiceRequest(input) +func (c *Lightsail) UpdateBucketBundleWithContext(ctx aws.Context, input *UpdateBucketBundleInput, opts ...request.Option) (*UpdateBucketBundleOutput, error) { + req, out := c.UpdateBucketBundleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDistribution = "UpdateDistribution" +const opUpdateContainerService = "UpdateContainerService" -// UpdateDistributionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDistribution operation. The "output" return +// UpdateContainerServiceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContainerService operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDistribution for more information on using the UpdateDistribution +// See UpdateContainerService for more information on using the UpdateContainerService // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDistributionRequest method. -// req, resp := client.UpdateDistributionRequest(params) +// // Example sending a request using the UpdateContainerServiceRequest method. +// req, resp := client.UpdateContainerServiceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution -func (c *Lightsail) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateContainerService +func (c *Lightsail) UpdateContainerServiceRequest(input *UpdateContainerServiceInput) (req *request.Request, output *UpdateContainerServiceOutput) { op := &request.Operation{ - Name: opUpdateDistribution, + Name: opUpdateContainerService, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateDistributionInput{} + input = &UpdateContainerServiceInput{} } - output = &UpdateDistributionOutput{} + output = &UpdateContainerServiceOutput{} req = c.newRequest(op, input, output) return } -// UpdateDistribution API operation for Amazon Lightsail. -// -// Updates an existing Amazon Lightsail content delivery network (CDN) distribution. +// UpdateContainerService API operation for Amazon Lightsail. // -// Use this action to update the configuration of your existing distribution +// Updates the configuration of your Amazon Lightsail container service, such +// as its power, scale, and public domain names. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UpdateDistribution for usage and error information. +// API operation UpdateContainerService for usage and error information. // // Returned Error Types: // * ServiceException @@ -14596,9 +15630,6 @@ func (c *Lightsail) UpdateDistributionRequest(input *UpdateDistributionInput) (r // * NotFoundException // Lightsail throws this exception when it cannot find a resource. // -// * OperationFailureException -// Lightsail throws this exception when an operation fails to execute. -// // * AccessDeniedException // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. @@ -14606,93 +15637,195 @@ func (c *Lightsail) UpdateDistributionRequest(input *UpdateDistributionInput) (r // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution -func (c *Lightsail) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { - req, out := c.UpdateDistributionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateContainerService +func (c *Lightsail) UpdateContainerService(input *UpdateContainerServiceInput) (*UpdateContainerServiceOutput, error) { + req, out := c.UpdateContainerServiceRequest(input) return out, req.Send() } -// UpdateDistributionWithContext is the same as UpdateDistribution with the addition of +// UpdateContainerServiceWithContext is the same as UpdateContainerService with the addition of // the ability to pass a context and additional request options. // -// See UpdateDistribution for details on how to use this API operation. +// See UpdateContainerService for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UpdateDistributionWithContext(ctx aws.Context, input *UpdateDistributionInput, opts ...request.Option) (*UpdateDistributionOutput, error) { - req, out := c.UpdateDistributionRequest(input) +func (c *Lightsail) UpdateContainerServiceWithContext(ctx aws.Context, input *UpdateContainerServiceInput, opts ...request.Option) (*UpdateContainerServiceOutput, error) { + req, out := c.UpdateContainerServiceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDistributionBundle = "UpdateDistributionBundle" +const opUpdateDistribution = "UpdateDistribution" -// UpdateDistributionBundleRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDistributionBundle operation. The "output" return +// UpdateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistribution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDistributionBundle for more information on using the UpdateDistributionBundle +// See UpdateDistribution for more information on using the UpdateDistribution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDistributionBundleRequest method. -// req, resp := client.UpdateDistributionBundleRequest(params) +// // Example sending a request using the UpdateDistributionRequest method. +// req, resp := client.UpdateDistributionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistributionBundle -func (c *Lightsail) UpdateDistributionBundleRequest(input *UpdateDistributionBundleInput) (req *request.Request, output *UpdateDistributionBundleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution +func (c *Lightsail) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { op := &request.Operation{ - Name: opUpdateDistributionBundle, + Name: opUpdateDistribution, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateDistributionBundleInput{} + input = &UpdateDistributionInput{} } - output = &UpdateDistributionBundleOutput{} + output = &UpdateDistributionOutput{} req = c.newRequest(op, input, output) return } -// UpdateDistributionBundle API operation for Amazon Lightsail. -// -// Updates the bundle of your Amazon Lightsail content delivery network (CDN) -// distribution. -// -// A distribution bundle specifies the monthly network transfer quota and monthly -// cost of your dsitribution. +// UpdateDistribution API operation for Amazon Lightsail. // -// Update your distribution's bundle if your distribution is going over its -// monthly network transfer quota and is incurring an overage fee. +// Updates an existing Amazon Lightsail content delivery network (CDN) distribution. // -// You can update your distribution's bundle only one time within your monthly -// AWS billing cycle. To determine if you can update your distribution's bundle, -// use the GetDistributions action. The ableToUpdateBundle parameter in the -// result will indicate whether you can currently update your distribution's -// bundle. +// Use this action to update the configuration of your existing distribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UpdateDistributionBundle for usage and error information. +// API operation UpdateDistribution for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain and distribution APIs are only available in the N. Virginia (us-east-1) +// AWS Region. Please set your AWS Region configuration to us-east-1 to create, +// view, or edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution +func (c *Lightsail) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + return out, req.Send() +} + +// UpdateDistributionWithContext is the same as UpdateDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateDistributionWithContext(ctx aws.Context, input *UpdateDistributionInput, opts ...request.Option) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDistributionBundle = "UpdateDistributionBundle" + +// UpdateDistributionBundleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistributionBundle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDistributionBundle for more information on using the UpdateDistributionBundle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDistributionBundleRequest method. +// req, resp := client.UpdateDistributionBundleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistributionBundle +func (c *Lightsail) UpdateDistributionBundleRequest(input *UpdateDistributionBundleInput) (req *request.Request, output *UpdateDistributionBundleOutput) { + op := &request.Operation{ + Name: opUpdateDistributionBundle, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDistributionBundleInput{} + } + + output = &UpdateDistributionBundleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDistributionBundle API operation for Amazon Lightsail. +// +// Updates the bundle of your Amazon Lightsail content delivery network (CDN) +// distribution. +// +// A distribution bundle specifies the monthly network transfer quota and monthly +// cost of your dsitribution. +// +// Update your distribution's bundle if your distribution is going over its +// monthly network transfer quota and is incurring an overage fee. +// +// You can update your distribution's bundle only one time within your monthly +// AWS billing cycle. To determine if you can update your distribution's bundle, +// use the GetDistributions action. The ableToUpdateBundle parameter in the +// result will indicate whether you can currently update your distribution's +// bundle. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateDistributionBundle for usage and error information. // // Returned Error Types: // * ServiceException @@ -14789,7 +15922,7 @@ func (c *Lightsail) UpdateDomainEntryRequest(input *UpdateDomainEntryInput) (req // // The update domain entry operation supports tag-based access control via resource // tags applied to the resource identified by domain name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14898,7 +16031,7 @@ func (c *Lightsail) UpdateLoadBalancerAttributeRequest(input *UpdateLoadBalancer // // The update load balancer attribute operation supports tag-based access control // via resource tags applied to the resource identified by load balancer name. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -15009,7 +16142,7 @@ func (c *Lightsail) UpdateRelationalDatabaseRequest(input *UpdateRelationalDatab // // The update relational database operation supports tag-based access control // via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -15124,7 +16257,7 @@ func (c *Lightsail) UpdateRelationalDatabaseParametersRequest(input *UpdateRelat // // The update relational database parameters operation supports tag-based access // control via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// For more information, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -15247,6 +16380,135 @@ func (s *AccessDeniedException) RequestID() string { return s.RespMetadata.RequestID } +// Describes an access key for an Amazon Lightsail bucket. +// +// Access keys grant full programmatic access to the specified bucket and its +// objects. You can have a maximum of two access keys per bucket. Use the CreateBucketAccessKey +// action to create an access key for a specific bucket. For more information +// about access keys, see Creating access keys for a bucket in Amazon Lightsail +// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-bucket-access-keys) +// in the Amazon Lightsail Developer Guide. +// +// The secretAccessKey value is returned only in response to the CreateBucketAccessKey +// action. You can get a secret access key only when you first create an access +// key; you cannot get the secret access key later. If you lose the secret access +// key, you must create a new access key. +type AccessKey struct { + _ struct{} `type:"structure"` + + // The ID of the access key. + AccessKeyId *string `locationName:"accessKeyId" min:"20" type:"string" sensitive:"true"` + + // The timestamp when the access key was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The secret access key used to sign requests. + // + // You should store the secret access key in a safe location. We recommend that + // you delete the access key if the secret access key is compromised. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string"` + + // The status of the access key. + // + // A status of Active means that the key is valid, while Inactive means it is + // not. + Status *string `locationName:"status" type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s AccessKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKey) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *AccessKey) SetAccessKeyId(v string) *AccessKey { + s.AccessKeyId = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *AccessKey) SetCreatedAt(v time.Time) *AccessKey { + s.CreatedAt = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *AccessKey) SetSecretAccessKey(v string) *AccessKey { + s.SecretAccessKey = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AccessKey) SetStatus(v string) *AccessKey { + s.Status = &v + return s +} + +// Describes the anonymous access permissions for an Amazon Lightsail bucket +// and its objects. +// +// For more information about bucket access permissions, see Understanding bucket +// permissions in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-understanding-bucket-permissions) +// in the +// +// Amazon Lightsail Developer Guide. +type AccessRules struct { + _ struct{} `type:"structure"` + + // A Boolean value that indicates whether the access control list (ACL) permissions + // that are applied to individual objects override the getObject option that + // is currently specified. + // + // When this is true, you can use the PutObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html) + // Amazon S3 API action to set individual objects to public (read-only) using + // the public-read ACL, or to private using the private ACL. + AllowPublicOverrides *bool `locationName:"allowPublicOverrides" type:"boolean"` + + // Specifies the anonymous access to all objects in a bucket. + // + // The following options can be specified: + // + // * public - Sets all objects in the bucket to public (read-only), making + // them readable by anyone in the world. If the getObject value is set to + // public, then all objects in the bucket default to public regardless of + // the allowPublicOverrides value. + // + // * private - Sets all objects in the bucket to private, making them readable + // only by you or anyone you give access to. If the getObject value is set + // to private, and the allowPublicOverrides value is set to true, then all + // objects in the bucket default to private unless they are configured with + // a public-read ACL. Individual objects with a public-read ACL are readable + // by anyone in the world. + GetObject *string `locationName:"getObject" type:"string" enum:"AccessType"` +} + +// String returns the string representation +func (s AccessRules) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessRules) GoString() string { + return s.String() +} + +// SetAllowPublicOverrides sets the AllowPublicOverrides field's value. +func (s *AccessRules) SetAllowPublicOverrides(v bool) *AccessRules { + s.AllowPublicOverrides = &v + return s +} + +// SetGetObject sets the GetObject field's value. +func (s *AccessRules) SetGetObject(v string) *AccessRules { + s.GetObject = &v + return s +} + // Lightsail throws this exception when an account is still in the setup in // progress state. type AccountSetupInProgressException struct { @@ -15426,8 +16688,8 @@ func (s *AddOnRequest) SetAutoSnapshotAddOnRequest(v *AutoSnapshotAddOnRequest) // Describes an alarm. // -// An alarm is a way to monitor your Amazon Lightsail resource metrics. For -// more information, see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). +// An alarm is a way to monitor your Lightsail resource metrics. For more information, +// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). type Alarm struct { _ struct{} `type:"structure"` @@ -16470,6 +17732,298 @@ func (s *Blueprint) SetVersionCode(v string) *Blueprint { return s } +// Describes an Amazon Lightsail bucket. +type Bucket struct { + _ struct{} `type:"structure"` + + // Indicates whether the bundle that is currently applied to a bucket can be + // changed to another bundle. + // + // You can update a bucket's bundle only one time within a monthly AWS billing + // cycle. + // + // Use the UpdateBucketBundle action to change a bucket's bundle. + AbleToUpdateBundle *bool `locationName:"ableToUpdateBundle" type:"boolean"` + + // An object that describes the access rules of the bucket. + AccessRules *AccessRules `locationName:"accessRules" type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the bundle currently applied to the bucket. + // + // A bucket bundle specifies the monthly cost, storage space, and data transfer + // quota for a bucket. + // + // Use the UpdateBucketBundle action to change the bundle of a bucket. + BundleId *string `locationName:"bundleId" type:"string"` + + // The timestamp when the distribution was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // Describes the resource location. + Location *ResourceLocation `locationName:"location" type:"structure"` + + // The name of the bucket. + Name *string `locationName:"name" min:"3" type:"string"` + + // Indicates whether object versioning is enabled for the bucket. + // + // The following options can be configured: + // + // * Enabled - Object versioning is enabled. + // + // * Suspended - Object versioning was previously enabled but is currently + // suspended. Existing object versions are retained. + // + // * NeverEnabled - Object versioning has never been enabled. + ObjectVersioning *string `locationName:"objectVersioning" type:"string"` + + // An array of strings that specify the AWS account IDs that have read-only + // access to the bucket. + ReadonlyAccessAccounts []*string `locationName:"readonlyAccessAccounts" type:"list"` + + // The Lightsail resource type of the bucket (for example, Bucket). + ResourceType *string `locationName:"resourceType" type:"string"` + + // An array of objects that describe Lightsail instances that have access to + // the bucket. + // + // Use the SetResourceAccessForBucket action to update the instances that have + // access to a bucket. + ResourcesReceivingAccess []*ResourceReceivingAccess `locationName:"resourcesReceivingAccess" type:"list"` + + // An object that describes the state of the bucket. + State *BucketState `locationName:"state" type:"structure"` + + // The support code for a bucket. Include this code in your email to support + // when you have questions about a Lightsail bucket. This code enables our support + // team to look up your Lightsail information more easily. + SupportCode *string `locationName:"supportCode" type:"string"` + + // The tag keys and optional values for the bucket. For more information, see + // Tags in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags) + // in the Amazon Lightsail Developer Guide. + Tags []*Tag `locationName:"tags" type:"list"` + + // The URL of the bucket. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetAbleToUpdateBundle sets the AbleToUpdateBundle field's value. +func (s *Bucket) SetAbleToUpdateBundle(v bool) *Bucket { + s.AbleToUpdateBundle = &v + return s +} + +// SetAccessRules sets the AccessRules field's value. +func (s *Bucket) SetAccessRules(v *AccessRules) *Bucket { + s.AccessRules = v + return s +} + +// SetArn sets the Arn field's value. +func (s *Bucket) SetArn(v string) *Bucket { + s.Arn = &v + return s +} + +// SetBundleId sets the BundleId field's value. +func (s *Bucket) SetBundleId(v string) *Bucket { + s.BundleId = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Bucket) SetCreatedAt(v time.Time) *Bucket { + s.CreatedAt = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *Bucket) SetLocation(v *ResourceLocation) *Bucket { + s.Location = v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// SetObjectVersioning sets the ObjectVersioning field's value. +func (s *Bucket) SetObjectVersioning(v string) *Bucket { + s.ObjectVersioning = &v + return s +} + +// SetReadonlyAccessAccounts sets the ReadonlyAccessAccounts field's value. +func (s *Bucket) SetReadonlyAccessAccounts(v []*string) *Bucket { + s.ReadonlyAccessAccounts = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *Bucket) SetResourceType(v string) *Bucket { + s.ResourceType = &v + return s +} + +// SetResourcesReceivingAccess sets the ResourcesReceivingAccess field's value. +func (s *Bucket) SetResourcesReceivingAccess(v []*ResourceReceivingAccess) *Bucket { + s.ResourcesReceivingAccess = v + return s +} + +// SetState sets the State field's value. +func (s *Bucket) SetState(v *BucketState) *Bucket { + s.State = v + return s +} + +// SetSupportCode sets the SupportCode field's value. +func (s *Bucket) SetSupportCode(v string) *Bucket { + s.SupportCode = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Bucket) SetTags(v []*Tag) *Bucket { + s.Tags = v + return s +} + +// SetUrl sets the Url field's value. +func (s *Bucket) SetUrl(v string) *Bucket { + s.Url = &v + return s +} + +// Describes the specifications of a bundle that can be applied to an Amazon +// Lightsail bucket. +// +// A bucket bundle specifies the monthly cost, storage space, and data transfer +// quota for a bucket. +type BucketBundle struct { + _ struct{} `type:"structure"` + + // The ID of the bundle. + BundleId *string `locationName:"bundleId" type:"string"` + + // Indicates whether the bundle is active. Use for a new or existing bucket. + IsActive *bool `locationName:"isActive" type:"boolean"` + + // The name of the bundle. + Name *string `locationName:"name" type:"string"` + + // The monthly price of the bundle, in US dollars. + Price *float64 `locationName:"price" type:"float"` + + // The storage size of the bundle, in GB. + StoragePerMonthInGb *int64 `locationName:"storagePerMonthInGb" type:"integer"` + + // The monthly network transfer quota of the bundle. + TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"` +} + +// String returns the string representation +func (s BucketBundle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketBundle) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *BucketBundle) SetBundleId(v string) *BucketBundle { + s.BundleId = &v + return s +} + +// SetIsActive sets the IsActive field's value. +func (s *BucketBundle) SetIsActive(v bool) *BucketBundle { + s.IsActive = &v + return s +} + +// SetName sets the Name field's value. +func (s *BucketBundle) SetName(v string) *BucketBundle { + s.Name = &v + return s +} + +// SetPrice sets the Price field's value. +func (s *BucketBundle) SetPrice(v float64) *BucketBundle { + s.Price = &v + return s +} + +// SetStoragePerMonthInGb sets the StoragePerMonthInGb field's value. +func (s *BucketBundle) SetStoragePerMonthInGb(v int64) *BucketBundle { + s.StoragePerMonthInGb = &v + return s +} + +// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value. +func (s *BucketBundle) SetTransferPerMonthInGb(v int64) *BucketBundle { + s.TransferPerMonthInGb = &v + return s +} + +// Describes the state of an Amazon Lightsail bucket. +type BucketState struct { + _ struct{} `type:"structure"` + + // The state code of the bucket. + // + // The following codes are possible: + // + // * OK - The bucket is in a running state. + // + // * Unknown - Creation of the bucket might have timed-out. You might want + // to delete the bucket and create a new one. + Code *string `locationName:"code" type:"string"` + + // A message that describes the state of the bucket. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BucketState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketState) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *BucketState) SetCode(v string) *BucketState { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *BucketState) SetMessage(v string) *BucketState { + s.Message = &v + return s +} + // Describes a bundle, which is a set of specs describing your virtual private // server (or instance). type Bundle struct { @@ -16949,7 +18503,7 @@ type Certificate struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -17106,7 +18660,7 @@ type CertificateSummary struct { DomainName *string `locationName:"domainName" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -17233,7 +18787,7 @@ func (s *CloseInstancePublicPortsOutput) SetOperation(v *Operation) *CloseInstan } // Describes a CloudFormation stack record created as a result of the create -// cloud formation stack operation. +// cloud formation stack action. // // A CloudFormation stack record provides information about the AWS CloudFormation // stack used to create a new Amazon Elastic Compute Cloud instance from an @@ -17692,7 +19246,7 @@ type ContainerService struct { StateDetail *ContainerServiceStateDetail `locationName:"stateDetail" type:"structure"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` // The publicly accessible URL of the container service. @@ -18014,7 +19568,8 @@ type ContainerServiceHealthCheckConfig struct { Path *string `locationName:"path" type:"string"` // The HTTP codes to use when checking for a successful response from a container. - // You can specify values between 200 and 499. + // You can specify values between 200 and 499. You can specify multiple values + // (for example, 200,202) or a range of values (for example, 200-299). SuccessCodes *string `locationName:"successCodes" type:"string"` // The amount of time, in seconds, during which no response means a failed health @@ -18349,7 +19904,8 @@ type CopySnapshotInput struct { // snapshot parameters are mutually exclusive. // // * Define this parameter only when copying an automatic snapshot as a manual - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). RestoreDate *string `locationName:"restoreDate" type:"string"` // The AWS Region where the source manual or automatic snapshot is located. @@ -18363,7 +19919,8 @@ type CopySnapshotInput struct { // Constraint: // // * Define this parameter only when copying an automatic snapshot as a manual - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). SourceResourceName *string `locationName:"sourceResourceName" type:"string"` // The name of the source manual snapshot to copy. @@ -18389,7 +19946,8 @@ type CopySnapshotInput struct { // mutually exclusive. // // * Define this parameter only when copying an automatic snapshot as a manual - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` } @@ -18480,6 +20038,209 @@ func (s *CopySnapshotOutput) SetOperations(v []*Operation) *CopySnapshotOutput { return s } +type CreateBucketAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket that the new access key will belong to, and grant + // access to. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBucketAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketAccessKeyInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *CreateBucketAccessKeyInput) SetBucketName(v string) *CreateBucketAccessKeyInput { + s.BucketName = &v + return s +} + +type CreateBucketAccessKeyOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the access key that is created. + AccessKey *AccessKey `locationName:"accessKey" type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s CreateBucketAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketAccessKeyOutput) GoString() string { + return s.String() +} + +// SetAccessKey sets the AccessKey field's value. +func (s *CreateBucketAccessKeyOutput) SetAccessKey(v *AccessKey) *CreateBucketAccessKeyOutput { + s.AccessKey = v + return s +} + +// SetOperations sets the Operations field's value. +func (s *CreateBucketAccessKeyOutput) SetOperations(v []*Operation) *CreateBucketAccessKeyOutput { + s.Operations = v + return s +} + +type CreateBucketInput struct { + _ struct{} `type:"structure"` + + // The name for the bucket. + // + // For more information about bucket names, see Bucket naming rules in Amazon + // Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/bucket-naming-rules-in-amazon-lightsail) + // in the Amazon Lightsail Developer Guide. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` + + // The ID of the bundle to use for the bucket. + // + // A bucket bundle specifies the monthly cost, storage space, and data transfer + // quota for a bucket. + // + // Use the GetBucketBundles action to get a list of bundle IDs that you can + // specify. + // + // Use the UpdateBucketBundle action to change the bundle after the bucket is + // created. + // + // BundleId is a required field + BundleId *string `locationName:"bundleId" type:"string" required:"true"` + + // A Boolean value that indicates whether to enable versioning of objects in + // the bucket. + // + // For more information about versioning, see Enabling and suspending bucket + // object versioning in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-managing-bucket-object-versioning) + // in the Amazon Lightsail Developer Guide. + EnableObjectVersioning *bool `locationName:"enableObjectVersioning" type:"boolean"` + + // The tag keys and optional values to add to the bucket during creation. + // + // Use the TagResource action to tag the bucket after it's created. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *CreateBucketInput) SetBucketName(v string) *CreateBucketInput { + s.BucketName = &v + return s +} + +// SetBundleId sets the BundleId field's value. +func (s *CreateBucketInput) SetBundleId(v string) *CreateBucketInput { + s.BundleId = &v + return s +} + +// SetEnableObjectVersioning sets the EnableObjectVersioning field's value. +func (s *CreateBucketInput) SetEnableObjectVersioning(v bool) *CreateBucketInput { + s.EnableObjectVersioning = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateBucketInput) SetTags(v []*Tag) *CreateBucketInput { + s.Tags = v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the bucket that is created. + Bucket *Bucket `locationName:"bucket" type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketOutput) SetBucket(v *Bucket) *CreateBucketOutput { + s.Bucket = v + return s +} + +// SetOperations sets the Operations field's value. +func (s *CreateBucketOutput) SetOperations(v []*Operation) *CreateBucketOutput { + s.Operations = v + return s +} + type CreateCertificateInput struct { _ struct{} `type:"structure"` @@ -18945,10 +20706,12 @@ type CreateContainerServiceInput struct { // ServiceName is a required field ServiceName *string `locationName:"serviceName" min:"1" type:"string" required:"true"` - // The tag keys and optional values for the container service. + // The tag keys and optional values to add to the certificate during create. // - // For more information about tags in Lightsail, see the Lightsail Dev Guide - // (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // Use the TagResource action to tag a resource after it's created. + // + // For more information about tags in Lightsail, see the Amazon Lightsail Developer + // Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -19132,7 +20895,8 @@ type CreateDiskFromSnapshotInput struct { // snapshot parameters are mutually exclusive. // // * Define this parameter only when creating a new disk from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). RestoreDate *string `locationName:"restoreDate" type:"string"` // The size of the disk in GB (e.g., 32). @@ -19150,7 +20914,8 @@ type CreateDiskFromSnapshotInput struct { // mutually exclusive. // // * Define this parameter only when creating a new disk from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). SourceDiskName *string `locationName:"sourceDiskName" type:"string"` // The tag keys and optional values to add to the resource during create. @@ -19168,7 +20933,8 @@ type CreateDiskFromSnapshotInput struct { // mutually exclusive. // // * Define this parameter only when creating a new disk from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` } @@ -19990,7 +21756,8 @@ type CreateInstancesFromSnapshotInput struct { // snapshot parameters are mutually exclusive. // // * Define this parameter only when creating a new instance from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). RestoreDate *string `locationName:"restoreDate" type:"string"` // The name of the source instance from which the source automatic snapshot @@ -20003,7 +21770,8 @@ type CreateInstancesFromSnapshotInput struct { // are mutually exclusive. // // * Define this parameter only when creating a new instance from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). SourceInstanceName *string `locationName:"sourceInstanceName" type:"string"` // The tag keys and optional values to add to the resource during create. @@ -20021,7 +21789,8 @@ type CreateInstancesFromSnapshotInput struct { // mutually exclusive. // // * Define this parameter only when creating a new instance from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + // snapshot. For more information, see the Amazon Lightsail Developer Guide + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` // You can create a launch script that configures a server with additional user @@ -20029,8 +21798,8 @@ type CreateInstancesFromSnapshotInput struct { // // Depending on the machine image you choose, the command to get software on // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu - // use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide - // (https://lightsail.aws.amazon.com/ls/docs/getting-started/article/compare-options-choose-lightsail-instance-image). + // use apt-get, and FreeBSD uses pkg. For a complete list, see the Amazon Lightsail + // Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/compare-options-choose-lightsail-instance-image). UserData *string `locationName:"userData" type:"string"` } @@ -20243,8 +22012,8 @@ type CreateInstancesInput struct { // // Depending on the machine image you choose, the command to get software on // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu - // use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide - // (https://lightsail.aws.amazon.com/ls/docs/getting-started/article/compare-options-choose-lightsail-instance-image). + // use apt-get, and FreeBSD uses pkg. For a complete list, see the Amazon Lightsail + // Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/compare-options-choose-lightsail-instance-image). UserData *string `locationName:"userData" type:"string"` } @@ -21472,6 +23241,181 @@ func (s *DeleteAutoSnapshotOutput) SetOperations(v []*Operation) *DeleteAutoSnap return s } +type DeleteBucketAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The ID of the access key to delete. + // + // Use the GetBucketAccessKeys action to get a list of access key IDs that you + // can specify. + // + // AccessKeyId is a required field + AccessKeyId *string `locationName:"accessKeyId" type:"string" required:"true"` + + // The name of the bucket that the access key belongs to. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAccessKeyInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *DeleteBucketAccessKeyInput) SetAccessKeyId(v string) *DeleteBucketAccessKeyInput { + s.AccessKeyId = &v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *DeleteBucketAccessKeyInput) SetBucketName(v string) *DeleteBucketAccessKeyInput { + s.BucketName = &v + return s +} + +type DeleteBucketAccessKeyOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s DeleteBucketAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAccessKeyOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *DeleteBucketAccessKeyOutput) SetOperations(v []*Operation) *DeleteBucketAccessKeyOutput { + s.Operations = v + return s +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket to delete. + // + // Use the GetBuckets action to get a list of bucket names that you can specify. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` + + // A Boolean value that indicates whether to force delete the bucket. + // + // You must force delete the bucket if it has one of the following conditions: + // + // * The bucket is the origin of a distribution. + // + // * The bucket has instances that were granted access to it using the SetResourceAccessForBucket + // action. + // + // * The bucket has objects. + // + // * The bucket has access keys. + // + // Force deleting a bucket might impact other resources that rely on the bucket, + // such as instances, distributions, or software that use the issued access + // keys. + ForceDelete *bool `locationName:"forceDelete" type:"boolean"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *DeleteBucketInput) SetBucketName(v string) *DeleteBucketInput { + s.BucketName = &v + return s +} + +// SetForceDelete sets the ForceDelete field's value. +func (s *DeleteBucketInput) SetForceDelete(v bool) *DeleteBucketInput { + s.ForceDelete = &v + return s +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *DeleteBucketOutput) SetOperations(v []*Operation) *DeleteBucketOutput { + s.Operations = v + return s +} + type DeleteCertificateInput struct { _ struct{} `type:"structure"` @@ -23022,7 +24966,7 @@ func (s *DisableAddOnOutput) SetOperations(v []*Operation) *DisableAddOnOutput { return s } -// Describes a system disk or a block storage disk. +// Describes a block storage disk. type Disk struct { _ struct{} `type:"structure"` @@ -23089,7 +25033,7 @@ type Disk struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -23343,7 +25287,7 @@ type DiskSnapshot struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -23478,7 +25422,8 @@ type DistributionBundle struct { // The ID of the bundle. BundleId *string `locationName:"bundleId" type:"string"` - // Indicates whether the bundle is active, and can be specified for a new distribution. + // Indicates whether the bundle is active, and can be specified for a new or + // existing distribution. IsActive *bool `locationName:"isActive" type:"boolean"` // The name of the distribution bundle. @@ -23531,7 +25476,7 @@ func (s *DistributionBundle) SetTransferPerMonthInGb(v int64) *DistributionBundl return s } -// Describes a domain where you are storing recordsets in Lightsail. +// Describes a domain where you are storing recordsets. type Domain struct { _ struct{} `type:"structure"` @@ -23559,7 +25504,7 @@ type Domain struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -24494,6 +26439,419 @@ func (s *GetBlueprintsOutput) SetNextPageToken(v string) *GetBlueprintsOutput { return s } +type GetBucketAccessKeysInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket for which to return access keys. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccessKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccessKeysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccessKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccessKeysInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *GetBucketAccessKeysInput) SetBucketName(v string) *GetBucketAccessKeysInput { + s.BucketName = &v + return s +} + +type GetBucketAccessKeysOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the access keys for the specified bucket. + AccessKeys []*AccessKey `locationName:"accessKeys" type:"list"` +} + +// String returns the string representation +func (s GetBucketAccessKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccessKeysOutput) GoString() string { + return s.String() +} + +// SetAccessKeys sets the AccessKeys field's value. +func (s *GetBucketAccessKeysOutput) SetAccessKeys(v []*AccessKey) *GetBucketAccessKeysOutput { + s.AccessKeys = v + return s +} + +type GetBucketBundlesInput struct { + _ struct{} `type:"structure"` + + // A Boolean value that indicates whether to include inactive (unavailable) + // bundles in the response. + IncludeInactive *bool `locationName:"includeInactive" type:"boolean"` +} + +// String returns the string representation +func (s GetBucketBundlesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketBundlesInput) GoString() string { + return s.String() +} + +// SetIncludeInactive sets the IncludeInactive field's value. +func (s *GetBucketBundlesInput) SetIncludeInactive(v bool) *GetBucketBundlesInput { + s.IncludeInactive = &v + return s +} + +type GetBucketBundlesOutput struct { + _ struct{} `type:"structure"` + + // An object that describes bucket bundles. + Bundles []*BucketBundle `locationName:"bundles" type:"list"` +} + +// String returns the string representation +func (s GetBucketBundlesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketBundlesOutput) GoString() string { + return s.String() +} + +// SetBundles sets the Bundles field's value. +func (s *GetBucketBundlesOutput) SetBundles(v []*BucketBundle) *GetBucketBundlesOutput { + s.Bundles = v + return s +} + +type GetBucketMetricDataInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket for which to get metric data. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` + + // The timestamp indicating the latest data to be returned. + // + // EndTime is a required field + EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"` + + // The metric for which you want to return information. + // + // Valid bucket metric names are listed below, along with the most useful statistics + // to include in your request, and the published unit value. + // + // These bucket metrics are reported once per day. + // + // * BucketSizeBytes - The amount of data in bytes stored in a bucket. This + // value is calculated by summing the size of all objects in the bucket (including + // object versions), including the size of all parts for all incomplete multipart + // uploads to the bucket. Statistics: The most useful statistic is Maximum. + // Unit: The published unit is Bytes. + // + // * NumberOfObjects - The total number of objects stored in a bucket. This + // value is calculated by counting all objects in the bucket (including object + // versions) and the total number of parts for all incomplete multipart uploads + // to the bucket. Statistics: The most useful statistic is Average. Unit: + // The published unit is Count. + // + // MetricName is a required field + MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"BucketMetricName"` + + // The granularity, in seconds, of the returned data points. + // + // Bucket storage metrics are reported once per day. Therefore, you should specify + // a period of 86400 seconds, which is the number of seconds in a day. + // + // Period is a required field + Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"` + + // The timestamp indicating the earliest data to be returned. + // + // StartTime is a required field + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` + + // The statistic for the metric. + // + // The following statistics are available: + // + // * Minimum - The lowest value observed during the specified period. Use + // this value to determine low volumes of activity for your application. + // + // * Maximum - The highest value observed during the specified period. Use + // this value to determine high volumes of activity for your application. + // + // * Sum - The sum of all values submitted for the matching metric. You can + // use this statistic to determine the total volume of a metric. + // + // * Average - The value of Sum / SampleCount during the specified period. + // By comparing this statistic with the Minimum and Maximum values, you can + // determine the full scope of a metric and how close the average use is + // to the Minimum and Maximum values. This comparison helps you to know when + // to increase or decrease your resources. + // + // * SampleCount - The count, or number, of data points used for the statistical + // calculation. + // + // Statistics is a required field + Statistics []*string `locationName:"statistics" type:"list" required:"true"` + + // The unit for the metric data request. + // + // Valid units depend on the metric data being requested. For the valid units + // with each available metric, see the metricName parameter. + // + // Unit is a required field + Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"` +} + +// String returns the string representation +func (s GetBucketMetricDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricDataInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Period == nil { + invalidParams.Add(request.NewErrParamRequired("Period")) + } + if s.Period != nil && *s.Period < 60 { + invalidParams.Add(request.NewErrParamMinValue("Period", 60)) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + if s.Statistics == nil { + invalidParams.Add(request.NewErrParamRequired("Statistics")) + } + if s.Unit == nil { + invalidParams.Add(request.NewErrParamRequired("Unit")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *GetBucketMetricDataInput) SetBucketName(v string) *GetBucketMetricDataInput { + s.BucketName = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *GetBucketMetricDataInput) SetEndTime(v time.Time) *GetBucketMetricDataInput { + s.EndTime = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *GetBucketMetricDataInput) SetMetricName(v string) *GetBucketMetricDataInput { + s.MetricName = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *GetBucketMetricDataInput) SetPeriod(v int64) *GetBucketMetricDataInput { + s.Period = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetBucketMetricDataInput) SetStartTime(v time.Time) *GetBucketMetricDataInput { + s.StartTime = &v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *GetBucketMetricDataInput) SetStatistics(v []*string) *GetBucketMetricDataInput { + s.Statistics = v + return s +} + +// SetUnit sets the Unit field's value. +func (s *GetBucketMetricDataInput) SetUnit(v string) *GetBucketMetricDataInput { + s.Unit = &v + return s +} + +type GetBucketMetricDataOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the metric data returned. + MetricData []*MetricDatapoint `locationName:"metricData" type:"list"` + + // The name of the metric returned. + MetricName *string `locationName:"metricName" type:"string" enum:"BucketMetricName"` +} + +// String returns the string representation +func (s GetBucketMetricDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricDataOutput) GoString() string { + return s.String() +} + +// SetMetricData sets the MetricData field's value. +func (s *GetBucketMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetBucketMetricDataOutput { + s.MetricData = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *GetBucketMetricDataOutput) SetMetricName(v string) *GetBucketMetricDataOutput { + s.MetricName = &v + return s +} + +type GetBucketsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket for which to return information. + // + // When omitted, the response includes all of your buckets in the AWS Region + // where the request is made. + BucketName *string `locationName:"bucketName" min:"3" type:"string"` + + // A Boolean value that indicates whether to include Lightsail instances that + // were given access to the bucket using the SetResourceAccessForBucket action. + IncludeConnectedResources *bool `locationName:"includeConnectedResources" type:"boolean"` + + // The token to advance to the next page of results from your request. + // + // To get a page token, perform an initial GetBuckets request. If your results + // are paginated, the response will return a next page token that you can specify + // as the page token in a subsequent request. + PageToken *string `locationName:"pageToken" type:"string"` +} + +// String returns the string representation +func (s GetBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketsInput"} + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *GetBucketsInput) SetBucketName(v string) *GetBucketsInput { + s.BucketName = &v + return s +} + +// SetIncludeConnectedResources sets the IncludeConnectedResources field's value. +func (s *GetBucketsInput) SetIncludeConnectedResources(v bool) *GetBucketsInput { + s.IncludeConnectedResources = &v + return s +} + +// SetPageToken sets the PageToken field's value. +func (s *GetBucketsInput) SetPageToken(v string) *GetBucketsInput { + s.PageToken = &v + return s +} + +type GetBucketsOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe buckets. + Buckets []*Bucket `locationName:"buckets" type:"list"` + + // The token to advance to the next page of results from your request. + // + // A next page token is not returned if there are no more results to display. + // + // To get the next page of results, perform another GetBuckets request and specify + // the next page token using the pageToken parameter. + NextPageToken *string `locationName:"nextPageToken" type:"string"` +} + +// String returns the string representation +func (s GetBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *GetBucketsOutput) SetBuckets(v []*Bucket) *GetBucketsOutput { + s.Buckets = v + return s +} + +// SetNextPageToken sets the NextPageToken field's value. +func (s *GetBucketsOutput) SetNextPageToken(v string) *GetBucketsOutput { + s.NextPageToken = &v + return s +} + type GetBundlesInput struct { _ struct{} `type:"structure"` @@ -25973,9 +28331,6 @@ type GetDistributionsInput struct { // The name of the distribution for which to return information. // - // Use the GetDistributions action to get a list of distribution names that - // you can specify. - // // When omitted, the response includes all of your distributions in the AWS // Region where the request is made. DistributionName *string `locationName:"distributionName" type:"string"` @@ -29360,7 +31715,7 @@ type Instance struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` // The user name for connecting to the instance (e.g., ec2-user). @@ -30312,7 +32667,7 @@ type InstanceSnapshot struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -30601,7 +32956,7 @@ func (s *IsVpcPeeredOutput) SetIsPeered(v bool) *IsVpcPeeredOutput { return s } -// Describes the SSH key pair. +// Describes an SSH key pair. type KeyPair struct { _ struct{} `type:"structure"` @@ -30629,7 +32984,7 @@ type KeyPair struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -30769,7 +33124,7 @@ type LightsailDistribution struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -30903,7 +33258,7 @@ func (s *LightsailDistribution) SetTags(v []*Tag) *LightsailDistribution { return s } -// Describes the Lightsail load balancer. +// Describes a load balancer. type LoadBalancer struct { _ struct{} `type:"structure"` @@ -30965,7 +33320,7 @@ type LoadBalancer struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` // An array of LoadBalancerTlsCertificateSummary objects that provide additional @@ -31235,7 +33590,7 @@ type LoadBalancerTlsCertificate struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -33315,7 +35670,7 @@ type RelationalDatabase struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -33920,7 +36275,7 @@ type RelationalDatabaseSnapshot struct { SupportCode *string `locationName:"supportCode" type:"string"` // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + // tags in Lightsail, see the Amazon Lightsail Developer Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). Tags []*Tag `locationName:"tags" type:"list"` } @@ -34259,6 +36614,39 @@ func (s *ResourceLocation) SetRegionName(v string) *ResourceLocation { return s } +// Describes an Amazon Lightsail instance that has access to a Lightsail bucket. +type ResourceReceivingAccess struct { + _ struct{} `type:"structure"` + + // The name of the Lightsail instance. + Name *string `locationName:"name" type:"string"` + + // The Lightsail resource type (for example, Instance). + ResourceType *string `locationName:"resourceType" type:"string"` +} + +// String returns the string representation +func (s ResourceReceivingAccess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceReceivingAccess) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *ResourceReceivingAccess) SetName(v string) *ResourceReceivingAccess { + s.Name = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ResourceReceivingAccess) SetResourceType(v string) *ResourceReceivingAccess { + s.ResourceType = &v + return s +} + // Describes the domain name system (DNS) records to add to your domain's DNS // to validate it for an Amazon Lightsail certificate. type ResourceRecord struct { @@ -34526,6 +36914,108 @@ func (s *SetIpAddressTypeOutput) SetOperations(v []*Operation) *SetIpAddressType return s } +type SetResourceAccessForBucketInput struct { + _ struct{} `type:"structure"` + + // The access setting. + // + // The following access settings are available: + // + // * allow - Allows access to the bucket and its objects. + // + // * deny - Denies access to the bucket and its objects. Use this setting + // to remove access for a resource previously set to allow. + // + // Access is a required field + Access *string `locationName:"access" type:"string" required:"true" enum:"ResourceBucketAccess"` + + // The name of the bucket for which to set access to another Lightsail resource. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` + + // The name of the Lightsail instance for which to set bucket access. The instance + // must be in a running or stopped state. + // + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s SetResourceAccessForBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetResourceAccessForBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetResourceAccessForBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetResourceAccessForBucketInput"} + if s.Access == nil { + invalidParams.Add(request.NewErrParamRequired("Access")) + } + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccess sets the Access field's value. +func (s *SetResourceAccessForBucketInput) SetAccess(v string) *SetResourceAccessForBucketInput { + s.Access = &v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *SetResourceAccessForBucketInput) SetBucketName(v string) *SetResourceAccessForBucketInput { + s.BucketName = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *SetResourceAccessForBucketInput) SetResourceName(v string) *SetResourceAccessForBucketInput { + s.ResourceName = &v + return s +} + +type SetResourceAccessForBucketOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s SetResourceAccessForBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetResourceAccessForBucketOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *SetResourceAccessForBucketOutput) SetOperations(v []*Operation) *SetResourceAccessForBucketOutput { + s.Operations = v + return s +} + type StartInstanceInput struct { _ struct{} `type:"structure"` @@ -34652,7 +37142,7 @@ func (s *StartRelationalDatabaseOutput) SetOperations(v []*Operation) *StartRela return s } -// Describes the static IP. +// Describes a static IP. type StaticIp struct { _ struct{} `type:"structure"` @@ -34902,8 +37392,8 @@ func (s *StopRelationalDatabaseOutput) SetOperations(v []*Operation) *StopRelati // Describes a tag key and optional value assigned to an Amazon Lightsail resource. // -// For more information about tags in Lightsail, see the Lightsail Dev Guide -// (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). +// For more information about tags in Lightsail, see the Amazon Lightsail Developer +// Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-tags). type Tag struct { _ struct{} `type:"structure"` @@ -35304,6 +37794,201 @@ func (s *UntagResourceOutput) SetOperations(v []*Operation) *UntagResourceOutput return s } +type UpdateBucketBundleInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket for which to update the bundle. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` + + // The ID of the new bundle to apply to the bucket. + // + // Use the GetBucketBundles action to get a list of bundle IDs that you can + // specify. + // + // BundleId is a required field + BundleId *string `locationName:"bundleId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateBucketBundleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBucketBundleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBucketBundleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBucketBundleInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *UpdateBucketBundleInput) SetBucketName(v string) *UpdateBucketBundleInput { + s.BucketName = &v + return s +} + +// SetBundleId sets the BundleId field's value. +func (s *UpdateBucketBundleInput) SetBundleId(v string) *UpdateBucketBundleInput { + s.BundleId = &v + return s +} + +type UpdateBucketBundleOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s UpdateBucketBundleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBucketBundleOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *UpdateBucketBundleOutput) SetOperations(v []*Operation) *UpdateBucketBundleOutput { + s.Operations = v + return s +} + +type UpdateBucketInput struct { + _ struct{} `type:"structure"` + + // An object that sets the public accessibility of objects in the specified + // bucket. + AccessRules *AccessRules `locationName:"accessRules" type:"structure"` + + // The name of the bucket to update. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"3" type:"string" required:"true"` + + // An array of strings to specify the AWS account IDs that can access the bucket. + // + // You can give a maximum of 10 AWS accounts access to a bucket. + ReadonlyAccessAccounts []*string `locationName:"readonlyAccessAccounts" type:"list"` + + // Specifies whether to enable or suspend versioning of objects in the bucket. + // + // The following options can be specified: + // + // * Enabled - Enables versioning of objects in the specified bucket. + // + // * Suspended - Suspends versioning of objects in the specified bucket. + // Existing object versions are retained. + Versioning *string `locationName:"versioning" type:"string"` +} + +// String returns the string representation +func (s UpdateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBucketInput"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessRules sets the AccessRules field's value. +func (s *UpdateBucketInput) SetAccessRules(v *AccessRules) *UpdateBucketInput { + s.AccessRules = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *UpdateBucketInput) SetBucketName(v string) *UpdateBucketInput { + s.BucketName = &v + return s +} + +// SetReadonlyAccessAccounts sets the ReadonlyAccessAccounts field's value. +func (s *UpdateBucketInput) SetReadonlyAccessAccounts(v []*string) *UpdateBucketInput { + s.ReadonlyAccessAccounts = v + return s +} + +// SetVersioning sets the Versioning field's value. +func (s *UpdateBucketInput) SetVersioning(v string) *UpdateBucketInput { + s.Versioning = &v + return s +} + +type UpdateBucketOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the bucket that is updated. + Bucket *Bucket `locationName:"bucket" type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s UpdateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBucketOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *UpdateBucketOutput) SetBucket(v *Bucket) *UpdateBucketOutput { + s.Bucket = v + return s +} + +// SetOperations sets the Operations field's value. +func (s *UpdateBucketOutput) SetOperations(v []*Operation) *UpdateBucketOutput { + s.Operations = v + return s +} + type UpdateContainerServiceInput struct { _ struct{} `type:"structure"` @@ -36078,6 +38763,22 @@ func AccessDirection_Values() []string { } } +const ( + // AccessTypePublic is a AccessType enum value + AccessTypePublic = "public" + + // AccessTypePrivate is a AccessType enum value + AccessTypePrivate = "private" +) + +// AccessType_Values returns all elements of the AccessType enum +func AccessType_Values() []string { + return []string{ + AccessTypePublic, + AccessTypePrivate, + } +} + const ( // AddOnTypeAutoSnapshot is a AddOnType enum value AddOnTypeAutoSnapshot = "AutoSnapshot" @@ -36166,6 +38867,22 @@ func BlueprintType_Values() []string { } } +const ( + // BucketMetricNameBucketSizeBytes is a BucketMetricName enum value + BucketMetricNameBucketSizeBytes = "BucketSizeBytes" + + // BucketMetricNameNumberOfObjects is a BucketMetricName enum value + BucketMetricNameNumberOfObjects = "NumberOfObjects" +) + +// BucketMetricName_Values returns all elements of the BucketMetricName enum +func BucketMetricName_Values() []string { + return []string{ + BucketMetricNameBucketSizeBytes, + BucketMetricNameNumberOfObjects, + } +} + const ( // CertificateStatusPendingValidation is a CertificateStatus enum value CertificateStatusPendingValidation = "PENDING_VALIDATION" @@ -37642,6 +40359,27 @@ const ( // OperationTypeDeleteContainerImage is a OperationType enum value OperationTypeDeleteContainerImage = "DeleteContainerImage" + + // OperationTypeCreateBucket is a OperationType enum value + OperationTypeCreateBucket = "CreateBucket" + + // OperationTypeDeleteBucket is a OperationType enum value + OperationTypeDeleteBucket = "DeleteBucket" + + // OperationTypeCreateBucketAccessKey is a OperationType enum value + OperationTypeCreateBucketAccessKey = "CreateBucketAccessKey" + + // OperationTypeDeleteBucketAccessKey is a OperationType enum value + OperationTypeDeleteBucketAccessKey = "DeleteBucketAccessKey" + + // OperationTypeUpdateBucketBundle is a OperationType enum value + OperationTypeUpdateBucketBundle = "UpdateBucketBundle" + + // OperationTypeUpdateBucket is a OperationType enum value + OperationTypeUpdateBucket = "UpdateBucket" + + // OperationTypeSetResourceAccessForBucket is a OperationType enum value + OperationTypeSetResourceAccessForBucket = "SetResourceAccessForBucket" ) // OperationType_Values returns all elements of the OperationType enum @@ -37719,6 +40457,13 @@ func OperationType_Values() []string { OperationTypeCreateContainerServiceRegistryLogin, OperationTypeRegisterContainerImage, OperationTypeDeleteContainerImage, + OperationTypeCreateBucket, + OperationTypeDeleteBucket, + OperationTypeCreateBucketAccessKey, + OperationTypeDeleteBucketAccessKey, + OperationTypeUpdateBucketBundle, + OperationTypeUpdateBucket, + OperationTypeSetResourceAccessForBucket, } } @@ -37856,6 +40601,9 @@ const ( // RegionNameApNortheast2 is a RegionName enum value RegionNameApNortheast2 = "ap-northeast-2" + + // RegionNameEuNorth1 is a RegionName enum value + RegionNameEuNorth1 = "eu-north-1" ) // RegionName_Values returns all elements of the RegionName enum @@ -37875,6 +40623,7 @@ func RegionName_Values() []string { RegionNameApSoutheast2, RegionNameApNortheast1, RegionNameApNortheast2, + RegionNameEuNorth1, } } @@ -37966,6 +40715,22 @@ func RenewalStatus_Values() []string { } } +const ( + // ResourceBucketAccessAllow is a ResourceBucketAccess enum value + ResourceBucketAccessAllow = "allow" + + // ResourceBucketAccessDeny is a ResourceBucketAccess enum value + ResourceBucketAccessDeny = "deny" +) + +// ResourceBucketAccess_Values returns all elements of the ResourceBucketAccess enum +func ResourceBucketAccess_Values() []string { + return []string{ + ResourceBucketAccessAllow, + ResourceBucketAccessDeny, + } +} + const ( // ResourceTypeContainerService is a ResourceType enum value ResourceTypeContainerService = "ContainerService" @@ -38023,6 +40788,9 @@ const ( // ResourceTypeCertificate is a ResourceType enum value ResourceTypeCertificate = "Certificate" + + // ResourceTypeBucket is a ResourceType enum value + ResourceTypeBucket = "Bucket" ) // ResourceType_Values returns all elements of the ResourceType enum @@ -38047,6 +40815,23 @@ func ResourceType_Values() []string { ResourceTypeContactMethod, ResourceTypeDistribution, ResourceTypeCertificate, + ResourceTypeBucket, + } +} + +const ( + // StatusTypeActive is a StatusType enum value + StatusTypeActive = "Active" + + // StatusTypeInactive is a StatusType enum value + StatusTypeInactive = "Inactive" +) + +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypeActive, + StatusTypeInactive, } } diff --git a/service/lightsail/doc.go b/service/lightsail/doc.go index 8dc6ffde61a..73c35ed0a6e 100644 --- a/service/lightsail/doc.go +++ b/service/lightsail/doc.go @@ -6,14 +6,15 @@ // Amazon Lightsail is the easiest way to get started with Amazon Web Services // (AWS) for developers who need to build websites or web applications. It includes // everything you need to launch your project quickly - instances (virtual private -// servers), container services, managed databases, SSD-based block storage, -// static IP addresses, load balancers, content delivery network (CDN) distributions, -// DNS management of registered domains, and resource snapshots (backups) - -// for a low, predictable monthly price. +// servers), container services, storage buckets, managed databases, SSD-based +// block storage, static IP addresses, load balancers, content delivery network +// (CDN) distributions, DNS management of registered domains, and resource snapshots +// (backups) - for a low, predictable monthly price. // // You can manage your Lightsail resources using the Lightsail console, Lightsail // API, AWS Command Line Interface (AWS CLI), or SDKs. For more information -// about Lightsail concepts and tasks, see the Lightsail Dev Guide (http://lightsail.aws.amazon.com/ls/docs/how-to/article/lightsail-how-to-set-up-access-keys-to-use-sdk-api-cli). +// about Lightsail concepts and tasks, see the Amazon Lightsail Developer Guide +// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/lightsail-how-to-set-up-access-keys-to-use-sdk-api-cli). // // This API Reference provides detailed information about the actions, data // types, parameters, and errors of the Lightsail service. For more information diff --git a/service/lightsail/lightsailiface/interface.go b/service/lightsail/lightsailiface/interface.go index e82fb6a86e3..478042d98b7 100644 --- a/service/lightsail/lightsailiface/interface.go +++ b/service/lightsail/lightsailiface/interface.go @@ -92,6 +92,14 @@ type LightsailAPI interface { CopySnapshotWithContext(aws.Context, *lightsail.CopySnapshotInput, ...request.Option) (*lightsail.CopySnapshotOutput, error) CopySnapshotRequest(*lightsail.CopySnapshotInput) (*request.Request, *lightsail.CopySnapshotOutput) + CreateBucket(*lightsail.CreateBucketInput) (*lightsail.CreateBucketOutput, error) + CreateBucketWithContext(aws.Context, *lightsail.CreateBucketInput, ...request.Option) (*lightsail.CreateBucketOutput, error) + CreateBucketRequest(*lightsail.CreateBucketInput) (*request.Request, *lightsail.CreateBucketOutput) + + CreateBucketAccessKey(*lightsail.CreateBucketAccessKeyInput) (*lightsail.CreateBucketAccessKeyOutput, error) + CreateBucketAccessKeyWithContext(aws.Context, *lightsail.CreateBucketAccessKeyInput, ...request.Option) (*lightsail.CreateBucketAccessKeyOutput, error) + CreateBucketAccessKeyRequest(*lightsail.CreateBucketAccessKeyInput) (*request.Request, *lightsail.CreateBucketAccessKeyOutput) + CreateCertificate(*lightsail.CreateCertificateInput) (*lightsail.CreateCertificateOutput, error) CreateCertificateWithContext(aws.Context, *lightsail.CreateCertificateInput, ...request.Option) (*lightsail.CreateCertificateOutput, error) CreateCertificateRequest(*lightsail.CreateCertificateInput) (*request.Request, *lightsail.CreateCertificateOutput) @@ -184,6 +192,14 @@ type LightsailAPI interface { DeleteAutoSnapshotWithContext(aws.Context, *lightsail.DeleteAutoSnapshotInput, ...request.Option) (*lightsail.DeleteAutoSnapshotOutput, error) DeleteAutoSnapshotRequest(*lightsail.DeleteAutoSnapshotInput) (*request.Request, *lightsail.DeleteAutoSnapshotOutput) + DeleteBucket(*lightsail.DeleteBucketInput) (*lightsail.DeleteBucketOutput, error) + DeleteBucketWithContext(aws.Context, *lightsail.DeleteBucketInput, ...request.Option) (*lightsail.DeleteBucketOutput, error) + DeleteBucketRequest(*lightsail.DeleteBucketInput) (*request.Request, *lightsail.DeleteBucketOutput) + + DeleteBucketAccessKey(*lightsail.DeleteBucketAccessKeyInput) (*lightsail.DeleteBucketAccessKeyOutput, error) + DeleteBucketAccessKeyWithContext(aws.Context, *lightsail.DeleteBucketAccessKeyInput, ...request.Option) (*lightsail.DeleteBucketAccessKeyOutput, error) + DeleteBucketAccessKeyRequest(*lightsail.DeleteBucketAccessKeyInput) (*request.Request, *lightsail.DeleteBucketAccessKeyOutput) + DeleteCertificate(*lightsail.DeleteCertificateInput) (*lightsail.DeleteCertificateOutput, error) DeleteCertificateWithContext(aws.Context, *lightsail.DeleteCertificateInput, ...request.Option) (*lightsail.DeleteCertificateOutput, error) DeleteCertificateRequest(*lightsail.DeleteCertificateInput) (*request.Request, *lightsail.DeleteCertificateOutput) @@ -300,6 +316,22 @@ type LightsailAPI interface { GetBlueprintsWithContext(aws.Context, *lightsail.GetBlueprintsInput, ...request.Option) (*lightsail.GetBlueprintsOutput, error) GetBlueprintsRequest(*lightsail.GetBlueprintsInput) (*request.Request, *lightsail.GetBlueprintsOutput) + GetBucketAccessKeys(*lightsail.GetBucketAccessKeysInput) (*lightsail.GetBucketAccessKeysOutput, error) + GetBucketAccessKeysWithContext(aws.Context, *lightsail.GetBucketAccessKeysInput, ...request.Option) (*lightsail.GetBucketAccessKeysOutput, error) + GetBucketAccessKeysRequest(*lightsail.GetBucketAccessKeysInput) (*request.Request, *lightsail.GetBucketAccessKeysOutput) + + GetBucketBundles(*lightsail.GetBucketBundlesInput) (*lightsail.GetBucketBundlesOutput, error) + GetBucketBundlesWithContext(aws.Context, *lightsail.GetBucketBundlesInput, ...request.Option) (*lightsail.GetBucketBundlesOutput, error) + GetBucketBundlesRequest(*lightsail.GetBucketBundlesInput) (*request.Request, *lightsail.GetBucketBundlesOutput) + + GetBucketMetricData(*lightsail.GetBucketMetricDataInput) (*lightsail.GetBucketMetricDataOutput, error) + GetBucketMetricDataWithContext(aws.Context, *lightsail.GetBucketMetricDataInput, ...request.Option) (*lightsail.GetBucketMetricDataOutput, error) + GetBucketMetricDataRequest(*lightsail.GetBucketMetricDataInput) (*request.Request, *lightsail.GetBucketMetricDataOutput) + + GetBuckets(*lightsail.GetBucketsInput) (*lightsail.GetBucketsOutput, error) + GetBucketsWithContext(aws.Context, *lightsail.GetBucketsInput, ...request.Option) (*lightsail.GetBucketsOutput, error) + GetBucketsRequest(*lightsail.GetBucketsInput) (*request.Request, *lightsail.GetBucketsOutput) + GetBundles(*lightsail.GetBundlesInput) (*lightsail.GetBundlesOutput, error) GetBundlesWithContext(aws.Context, *lightsail.GetBundlesInput, ...request.Option) (*lightsail.GetBundlesOutput, error) GetBundlesRequest(*lightsail.GetBundlesInput) (*request.Request, *lightsail.GetBundlesOutput) @@ -568,6 +600,10 @@ type LightsailAPI interface { SetIpAddressTypeWithContext(aws.Context, *lightsail.SetIpAddressTypeInput, ...request.Option) (*lightsail.SetIpAddressTypeOutput, error) SetIpAddressTypeRequest(*lightsail.SetIpAddressTypeInput) (*request.Request, *lightsail.SetIpAddressTypeOutput) + SetResourceAccessForBucket(*lightsail.SetResourceAccessForBucketInput) (*lightsail.SetResourceAccessForBucketOutput, error) + SetResourceAccessForBucketWithContext(aws.Context, *lightsail.SetResourceAccessForBucketInput, ...request.Option) (*lightsail.SetResourceAccessForBucketOutput, error) + SetResourceAccessForBucketRequest(*lightsail.SetResourceAccessForBucketInput) (*request.Request, *lightsail.SetResourceAccessForBucketOutput) + StartInstance(*lightsail.StartInstanceInput) (*lightsail.StartInstanceOutput, error) StartInstanceWithContext(aws.Context, *lightsail.StartInstanceInput, ...request.Option) (*lightsail.StartInstanceOutput, error) StartInstanceRequest(*lightsail.StartInstanceInput) (*request.Request, *lightsail.StartInstanceOutput) @@ -600,6 +636,14 @@ type LightsailAPI interface { UntagResourceWithContext(aws.Context, *lightsail.UntagResourceInput, ...request.Option) (*lightsail.UntagResourceOutput, error) UntagResourceRequest(*lightsail.UntagResourceInput) (*request.Request, *lightsail.UntagResourceOutput) + UpdateBucket(*lightsail.UpdateBucketInput) (*lightsail.UpdateBucketOutput, error) + UpdateBucketWithContext(aws.Context, *lightsail.UpdateBucketInput, ...request.Option) (*lightsail.UpdateBucketOutput, error) + UpdateBucketRequest(*lightsail.UpdateBucketInput) (*request.Request, *lightsail.UpdateBucketOutput) + + UpdateBucketBundle(*lightsail.UpdateBucketBundleInput) (*lightsail.UpdateBucketBundleOutput, error) + UpdateBucketBundleWithContext(aws.Context, *lightsail.UpdateBucketBundleInput, ...request.Option) (*lightsail.UpdateBucketBundleOutput, error) + UpdateBucketBundleRequest(*lightsail.UpdateBucketBundleInput) (*request.Request, *lightsail.UpdateBucketBundleOutput) + UpdateContainerService(*lightsail.UpdateContainerServiceInput) (*lightsail.UpdateContainerServiceOutput, error) UpdateContainerServiceWithContext(aws.Context, *lightsail.UpdateContainerServiceInput, ...request.Option) (*lightsail.UpdateContainerServiceOutput, error) UpdateContainerServiceRequest(*lightsail.UpdateContainerServiceInput) (*request.Request, *lightsail.UpdateContainerServiceOutput) diff --git a/service/wellarchitected/api.go b/service/wellarchitected/api.go index d1ed9fb0c19..18d9f4322d1 100644 --- a/service/wellarchitected/api.go +++ b/service/wellarchitected/api.go @@ -742,7 +742,7 @@ func (c *WellArchitected) GetAnswerRequest(input *GetAnswerInput) (req *request. // GetAnswer API operation for AWS Well-Architected Tool. // -// Get lens review. +// Get the answer to a specific question in a workload review. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2785,6 +2785,10 @@ func (c *WellArchitected) UntagResourceRequest(input *UntagResourceInput) (req * // // Deletes specified tags from a resource. // +// To specify multiple tags, use separate tagKeys parameters, for example: +// +// DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2 +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3447,6 +3451,9 @@ func (s *AccessDeniedException) RequestID() string { type Answer struct { _ struct{} `type:"structure"` + // A list of selected choices to a question in your workload. + ChoiceAnswers []*ChoiceAnswer `type:"list"` + // List of choices available for a question. Choices []*Choice `type:"list"` @@ -3454,6 +3461,8 @@ type Answer struct { HelpfulResourceUrl *string `min:"1" type:"string"` // The improvement plan URL for a question. + // + // This value is only available if the question has been answered. ImprovementPlanUrl *string `min:"1" type:"string"` // Defines whether this question is applicable to a lens review. @@ -3476,6 +3485,9 @@ type Answer struct { // The title of the question. QuestionTitle *string `min:"1" type:"string"` + // The reason why the question is not applicable to your workload. + Reason *string `type:"string" enum:"AnswerReason"` + // The risk for a given workload, lens review, pillar, or question. Risk *string `type:"string" enum:"Risk"` @@ -3495,6 +3507,12 @@ func (s Answer) GoString() string { return s.String() } +// SetChoiceAnswers sets the ChoiceAnswers field's value. +func (s *Answer) SetChoiceAnswers(v []*ChoiceAnswer) *Answer { + s.ChoiceAnswers = v + return s +} + // SetChoices sets the Choices field's value. func (s *Answer) SetChoices(v []*Choice) *Answer { s.Choices = v @@ -3549,6 +3567,12 @@ func (s *Answer) SetQuestionTitle(v string) *Answer { return s } +// SetReason sets the Reason field's value. +func (s *Answer) SetReason(v string) *Answer { + s.Reason = &v + return s +} + // SetRisk sets the Risk field's value. func (s *Answer) SetRisk(v string) *Answer { s.Risk = &v @@ -3565,6 +3589,9 @@ func (s *Answer) SetSelectedChoices(v []*string) *Answer { type AnswerSummary struct { _ struct{} `type:"structure"` + // A list of selected choices to a question in your workload. + ChoiceAnswerSummaries []*ChoiceAnswerSummary `type:"list"` + // List of choices available for a question. Choices []*Choice `type:"list"` @@ -3582,6 +3609,9 @@ type AnswerSummary struct { // The title of the question. QuestionTitle *string `min:"1" type:"string"` + // The reason why a choice is non-applicable to a question in your workload. + Reason *string `type:"string" enum:"AnswerReason"` + // The risk for a given workload, lens review, pillar, or question. Risk *string `type:"string" enum:"Risk"` @@ -3601,6 +3631,12 @@ func (s AnswerSummary) GoString() string { return s.String() } +// SetChoiceAnswerSummaries sets the ChoiceAnswerSummaries field's value. +func (s *AnswerSummary) SetChoiceAnswerSummaries(v []*ChoiceAnswerSummary) *AnswerSummary { + s.ChoiceAnswerSummaries = v + return s +} + // SetChoices sets the Choices field's value. func (s *AnswerSummary) SetChoices(v []*Choice) *AnswerSummary { s.Choices = v @@ -3631,6 +3667,12 @@ func (s *AnswerSummary) SetQuestionTitle(v string) *AnswerSummary { return s } +// SetReason sets the Reason field's value. +func (s *AnswerSummary) SetReason(v string) *AnswerSummary { + s.Reason = &v + return s +} + // SetRisk sets the Risk field's value. func (s *AnswerSummary) SetRisk(v string) *AnswerSummary { s.Risk = &v @@ -3760,6 +3802,156 @@ func (s *Choice) SetTitle(v string) *Choice { return s } +// A choice that has been answered on a question in your workload. +type ChoiceAnswer struct { + _ struct{} `type:"structure"` + + // The ID of a choice. + ChoiceId *string `min:"1" type:"string"` + + // The notes associated with a choice. + Notes *string `type:"string"` + + // The reason why a choice is non-applicable to a question in your workload. + Reason *string `type:"string" enum:"ChoiceReason"` + + // The status of a choice. + Status *string `type:"string" enum:"ChoiceStatus"` +} + +// String returns the string representation +func (s ChoiceAnswer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChoiceAnswer) GoString() string { + return s.String() +} + +// SetChoiceId sets the ChoiceId field's value. +func (s *ChoiceAnswer) SetChoiceId(v string) *ChoiceAnswer { + s.ChoiceId = &v + return s +} + +// SetNotes sets the Notes field's value. +func (s *ChoiceAnswer) SetNotes(v string) *ChoiceAnswer { + s.Notes = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *ChoiceAnswer) SetReason(v string) *ChoiceAnswer { + s.Reason = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ChoiceAnswer) SetStatus(v string) *ChoiceAnswer { + s.Status = &v + return s +} + +// A choice summary that has been answered on a question in your workload. +type ChoiceAnswerSummary struct { + _ struct{} `type:"structure"` + + // The ID of a choice. + ChoiceId *string `min:"1" type:"string"` + + // The reason why a choice is non-applicable to a question in your workload. + Reason *string `type:"string" enum:"ChoiceReason"` + + // The status of a choice. + Status *string `type:"string" enum:"ChoiceStatus"` +} + +// String returns the string representation +func (s ChoiceAnswerSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChoiceAnswerSummary) GoString() string { + return s.String() +} + +// SetChoiceId sets the ChoiceId field's value. +func (s *ChoiceAnswerSummary) SetChoiceId(v string) *ChoiceAnswerSummary { + s.ChoiceId = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *ChoiceAnswerSummary) SetReason(v string) *ChoiceAnswerSummary { + s.Reason = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ChoiceAnswerSummary) SetStatus(v string) *ChoiceAnswerSummary { + s.Status = &v + return s +} + +// A list of choices to be updated. +type ChoiceUpdate struct { + _ struct{} `type:"structure"` + + // The notes associated with a choice. + Notes *string `type:"string"` + + // The reason why a choice is non-applicable to a question in your workload. + Reason *string `type:"string" enum:"ChoiceReason"` + + // The status of a choice. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ChoiceStatus"` +} + +// String returns the string representation +func (s ChoiceUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChoiceUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChoiceUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChoiceUpdate"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotes sets the Notes field's value. +func (s *ChoiceUpdate) SetNotes(v string) *ChoiceUpdate { + s.Notes = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *ChoiceUpdate) SetReason(v string) *ChoiceUpdate { + s.Reason = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ChoiceUpdate) SetStatus(v string) *ChoiceUpdate { + s.Status = &v + return s +} + // The resource already exists. type ConflictException struct { _ struct{} `type:"structure"` @@ -5269,6 +5461,8 @@ type ImprovementSummary struct { _ struct{} `type:"structure"` // The improvement plan URL for a question. + // + // This value is only available if the question has been answered. ImprovementPlanUrl *string `min:"1" type:"string"` // The ID used to identify a pillar, for example, security. @@ -7476,7 +7670,8 @@ func (s *ThrottlingException) RequestID() string { type UntagResourceInput struct { _ struct{} `type:"structure"` - // The keys of the tags to be removed. + // A list of tag keys. Existing tags of the resource whose keys are members + // of this list are removed from the resource. // // TagKeys is a required field TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` @@ -7549,6 +7744,10 @@ func (s UntagResourceOutput) GoString() string { type UpdateAnswerInput struct { _ struct{} `type:"structure"` + // A list of choices to update on a question in your workload. The String key + // corresponds to the choice ID to be updated. + ChoiceUpdates map[string]*ChoiceUpdate `type:"map"` + // Defines whether this question is applicable to a lens review. IsApplicable *bool `type:"boolean"` @@ -7567,6 +7766,9 @@ type UpdateAnswerInput struct { // QuestionId is a required field QuestionId *string `location:"uri" locationName:"QuestionId" min:"1" type:"string" required:"true"` + // The reason why a question is not applicable to your workload. + Reason *string `type:"string" enum:"AnswerReason"` + // List of selected choice IDs in a question answer. // // The values entered replace the previously selected choices. @@ -7609,6 +7811,16 @@ func (s *UpdateAnswerInput) Validate() error { if s.WorkloadId != nil && len(*s.WorkloadId) < 1 { invalidParams.Add(request.NewErrParamMinLen("WorkloadId", 1)) } + if s.ChoiceUpdates != nil { + for i, v := range s.ChoiceUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ChoiceUpdates", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7616,6 +7828,12 @@ func (s *UpdateAnswerInput) Validate() error { return nil } +// SetChoiceUpdates sets the ChoiceUpdates field's value. +func (s *UpdateAnswerInput) SetChoiceUpdates(v map[string]*ChoiceUpdate) *UpdateAnswerInput { + s.ChoiceUpdates = v + return s +} + // SetIsApplicable sets the IsApplicable field's value. func (s *UpdateAnswerInput) SetIsApplicable(v bool) *UpdateAnswerInput { s.IsApplicable = &v @@ -7640,6 +7858,12 @@ func (s *UpdateAnswerInput) SetQuestionId(v string) *UpdateAnswerInput { return s } +// SetReason sets the Reason field's value. +func (s *UpdateAnswerInput) SetReason(v string) *UpdateAnswerInput { + s.Reason = &v + return s +} + // SetSelectedChoices sets the SelectedChoices field's value. func (s *UpdateAnswerInput) SetSelectedChoices(v []*string) *UpdateAnswerInput { s.SelectedChoices = v @@ -9008,6 +9232,82 @@ func (s *WorkloadSummary) SetWorkloadName(v string) *WorkloadSummary { return s } +const ( + // AnswerReasonOutOfScope is a AnswerReason enum value + AnswerReasonOutOfScope = "OUT_OF_SCOPE" + + // AnswerReasonBusinessPriorities is a AnswerReason enum value + AnswerReasonBusinessPriorities = "BUSINESS_PRIORITIES" + + // AnswerReasonArchitectureConstraints is a AnswerReason enum value + AnswerReasonArchitectureConstraints = "ARCHITECTURE_CONSTRAINTS" + + // AnswerReasonOther is a AnswerReason enum value + AnswerReasonOther = "OTHER" + + // AnswerReasonNone is a AnswerReason enum value + AnswerReasonNone = "NONE" +) + +// AnswerReason_Values returns all elements of the AnswerReason enum +func AnswerReason_Values() []string { + return []string{ + AnswerReasonOutOfScope, + AnswerReasonBusinessPriorities, + AnswerReasonArchitectureConstraints, + AnswerReasonOther, + AnswerReasonNone, + } +} + +const ( + // ChoiceReasonOutOfScope is a ChoiceReason enum value + ChoiceReasonOutOfScope = "OUT_OF_SCOPE" + + // ChoiceReasonBusinessPriorities is a ChoiceReason enum value + ChoiceReasonBusinessPriorities = "BUSINESS_PRIORITIES" + + // ChoiceReasonArchitectureConstraints is a ChoiceReason enum value + ChoiceReasonArchitectureConstraints = "ARCHITECTURE_CONSTRAINTS" + + // ChoiceReasonOther is a ChoiceReason enum value + ChoiceReasonOther = "OTHER" + + // ChoiceReasonNone is a ChoiceReason enum value + ChoiceReasonNone = "NONE" +) + +// ChoiceReason_Values returns all elements of the ChoiceReason enum +func ChoiceReason_Values() []string { + return []string{ + ChoiceReasonOutOfScope, + ChoiceReasonBusinessPriorities, + ChoiceReasonArchitectureConstraints, + ChoiceReasonOther, + ChoiceReasonNone, + } +} + +const ( + // ChoiceStatusSelected is a ChoiceStatus enum value + ChoiceStatusSelected = "SELECTED" + + // ChoiceStatusNotApplicable is a ChoiceStatus enum value + ChoiceStatusNotApplicable = "NOT_APPLICABLE" + + // ChoiceStatusUnselected is a ChoiceStatus enum value + ChoiceStatusUnselected = "UNSELECTED" +) + +// ChoiceStatus_Values returns all elements of the ChoiceStatus enum +func ChoiceStatus_Values() []string { + return []string{ + ChoiceStatusSelected, + ChoiceStatusNotApplicable, + ChoiceStatusUnselected, + } +} + const ( // DifferenceStatusUpdated is a DifferenceStatus enum value DifferenceStatusUpdated = "UPDATED"