diff --git a/CHANGELOG.md b/CHANGELOG.md index a7821fd3413..3af72892acc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.45.18 (2023-09-27) +=== + +### Service Client Updates +* `service/cognito-idp`: Updates service API, documentation, and examples +* `service/firehose`: Updates service API and documentation + * Features : Adding support for new data ingestion source to Kinesis Firehose - AWS Managed Services Kafka. +* `service/iot`: Updates service API and documentation + * Added support for IoT Rules Engine Kafka Action Headers +* `service/textract`: Updates service API and documentation + Release v1.45.17 (2023-09-26) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index c0faf837ec6..25c9fdc191b 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -2572,21 +2572,81 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -11044,6 +11104,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11062,6 +11128,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11071,6 +11146,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -11101,9 +11194,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -18834,12 +18939,30 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "il-central-1", }: endpoint{}, @@ -20519,6 +20642,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "us-east-1", }: endpoint{ diff --git a/aws/version.go b/aws/version.go index e5d20a04072..d9fc9a65ffd 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.45.17" +const SDKVersion = "1.45.18" diff --git a/models/apis/cognito-idp/2016-04-18/api-2.json b/models/apis/cognito-idp/2016-04-18/api-2.json index c96db61a592..aaa30e7d78c 100644 --- a/models/apis/cognito-idp/2016-04-18/api-2.json +++ b/models/apis/cognito-idp/2016-04-18/api-2.json @@ -5571,7 +5571,11 @@ "Id":{"shape":"UserPoolIdType"}, "Name":{"shape":"UserPoolNameType"}, "LambdaConfig":{"shape":"LambdaConfigType"}, - "Status":{"shape":"StatusType"}, + "Status":{ + "shape":"StatusType", + "deprecated":true, + "deprecatedMessage":"This property is no longer available." + }, "LastModifiedDate":{"shape":"DateType"}, "CreationDate":{"shape":"DateType"} } @@ -5630,7 +5634,11 @@ "Policies":{"shape":"UserPoolPolicyType"}, "DeletionProtection":{"shape":"DeletionProtectionType"}, "LambdaConfig":{"shape":"LambdaConfigType"}, - "Status":{"shape":"StatusType"}, + "Status":{ + "shape":"StatusType", + "deprecated":true, + "deprecatedMessage":"This property is no longer available." + }, "LastModifiedDate":{"shape":"DateType"}, "CreationDate":{"shape":"DateType"}, "SchemaAttributes":{"shape":"SchemaAttributesListType"}, diff --git a/models/apis/cognito-idp/2016-04-18/docs-2.json b/models/apis/cognito-idp/2016-04-18/docs-2.json index 1c75e970833..c4f75d75487 100644 --- a/models/apis/cognito-idp/2016-04-18/docs-2.json +++ b/models/apis/cognito-idp/2016-04-18/docs-2.json @@ -3288,7 +3288,7 @@ "refs": { "AdminAddUserToGroupRequest$Username": "

The username for the user.

", "AdminConfirmSignUpRequest$Username": "

The user name for which you want to confirm user registration.

", - "AdminCreateUserRequest$Username": "

The username for the user. Must be unique within the user pool. Must be a UTF-8 string between 1 and 128 characters. After the user is created, the username can't be changed.

", + "AdminCreateUserRequest$Username": "

The value that you want to set as the username sign-in attribute. The following conditions apply to the username parameter.

", "AdminDeleteUserAttributesRequest$Username": "

The user name of the user from which you would like to delete attributes.

", "AdminDeleteUserRequest$Username": "

The user name of the user you want to delete.

", "AdminDisableUserRequest$Username": "

The user name of the user you want to disable.

", diff --git a/models/apis/cognito-idp/2016-04-18/examples-1.json b/models/apis/cognito-idp/2016-04-18/examples-1.json index b97bf83357e..9041ff8746c 100644 --- a/models/apis/cognito-idp/2016-04-18/examples-1.json +++ b/models/apis/cognito-idp/2016-04-18/examples-1.json @@ -695,83 +695,6 @@ } ], "ListUsers": [ - { - "input": { - "AttributesToGet": [ - "email", - "sub" - ], - "Filter": "\"email\"^=\"testuser\"", - "Limit": 3, - "PaginationToken": "abcd1234EXAMPLE", - "UserPoolId": "us-east-1_EXAMPLE" - }, - "output": { - "PaginationToken": "efgh5678EXAMPLE", - "Users": [ - { - "Attributes": [ - { - "Name": "sub", - "Value": "eaad0219-2117-439f-8d46-4db20e59268f" - }, - { - "Name": "email", - "Value": "testuser@example.com" - } - ], - "Enabled": true, - "UserCreateDate": 1682955829.578, - "UserLastModifiedDate": 1689030181.63, - "UserStatus": "CONFIRMED", - "Username": "testuser" - }, - { - "Attributes": [ - { - "Name": "sub", - "Value": "3b994cfd-0b07-4581-be46-3c82f9a70c90" - }, - { - "Name": "email", - "Value": "testuser2@example.com" - } - ], - "Enabled": true, - "UserCreateDate": 1684427979.201, - "UserLastModifiedDate": 1684427979.201, - "UserStatus": "UNCONFIRMED", - "Username": "testuser2" - }, - { - "Attributes": [ - { - "Name": "sub", - "Value": "5929e0d1-4c34-42d1-9b79-a5ecacfe66f7" - }, - { - "Name": "email", - "Value": "testuser3@example.com" - } - ], - "Enabled": true, - "UserCreateDate": 1684427823.641, - "UserLastModifiedDate": 1684427823.641, - "UserStatus": "UNCONFIRMED", - "Username": "testuser3@example.com" - } - ] - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "This request submits a value for all possible parameters for ListUsers. By iterating the PaginationToken, you can page through and collect all users in a user pool.", - "id": "a-listusers-request-for-the-next-3-users-whose-email-address-starts-with-testuser-1689977648246", - "title": "A ListUsers request for the next 3 users whose email address starts with \"testuser.\"" - }, { "input": { "AttributesToGet": [ diff --git a/models/apis/firehose/2015-08-04/api-2.json b/models/apis/firehose/2015-08-04/api-2.json index bcec64710ff..e8e1a31e4ae 100644 --- a/models/apis/firehose/2015-08-04/api-2.json +++ b/models/apis/firehose/2015-08-04/api-2.json @@ -414,6 +414,17 @@ "min":0, "pattern":".*" }, + "AuthenticationConfiguration":{ + "type":"structure", + "required":[ + "RoleARN", + "Connectivity" + ], + "members":{ + "RoleARN":{"shape":"RoleARN"}, + "Connectivity":{"shape":"Connectivity"} + } + }, "BlockSizeBytes":{ "type":"integer", "min":67108864 @@ -444,7 +455,7 @@ "type":"string", "max":512, "min":1, - "pattern":"jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

", "ListDeliveryStreams": "

Lists your delivery streams in alphabetical order of their names.

The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.

", "ListTagsForDeliveryStream": "

Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.

", - "PutRecord": "

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", - "PutRecordBatch": "

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

For information about service quota, see Amazon Kinesis Data Firehose Quota.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", - "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", + "PutRecord": "

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", + "PutRecordBatch": "

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

For information about service quota, see Amazon Kinesis Data Firehose Quota.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", + "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", "StopDeliveryStreamEncryption": "

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", "TagDeliveryStream": "

Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

Each delivery stream can have up to 50 tags.

This operation has a limit of five transactions per second per account.

", "UntagDeliveryStream": "

Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.

If you specify a tag that doesn't exist, the operation ignores it.

This operation has a limit of five transactions per second per account.

", @@ -198,6 +198,13 @@ "AmazonopensearchserviceDestinationUpdate$TypeName": "

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" } }, + "AuthenticationConfiguration": { + "base": "

The authentication configuration of the Amazon MSK cluster.

", + "refs": { + "MSKSourceConfiguration$AuthenticationConfiguration": "

The authentication configuration of the Amazon MSK cluster.

", + "MSKSourceDescription$AuthenticationConfiguration": "

The authentication configuration of the Amazon MSK cluster.

" + } + }, "BlockSizeBytes": { "base": null, "refs": { @@ -305,6 +312,12 @@ "refs": { } }, + "Connectivity": { + "base": null, + "refs": { + "AuthenticationConfiguration$Connectivity": "

The type of connectivity used to access the Amazon MSK cluster.

" + } + }, "ContentEncoding": { "base": null, "refs": { @@ -380,7 +393,8 @@ "DeliveryStartTimestamp": { "base": null, "refs": { - "KinesisStreamSourceDescription$DeliveryStartTimestamp": "

Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

" + "KinesisStreamSourceDescription$DeliveryStartTimestamp": "

Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

", + "MSKSourceDescription$DeliveryStartTimestamp": "

Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

" } }, "DeliveryStreamARN": { @@ -978,6 +992,25 @@ "CloudWatchLoggingOptions$LogStreamName": "

The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.

" } }, + "MSKClusterARN": { + "base": null, + "refs": { + "MSKSourceConfiguration$MSKClusterARN": "

The ARN of the Amazon MSK cluster.

", + "MSKSourceDescription$MSKClusterARN": "

The ARN of the Amazon MSK cluster.

" + } + }, + "MSKSourceConfiguration": { + "base": "

The configuration for the Amazon MSK cluster to be used as the source for a delivery stream.

", + "refs": { + "CreateDeliveryStreamInput$MSKSourceConfiguration": null + } + }, + "MSKSourceDescription": { + "base": "

Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose delivery stream.

", + "refs": { + "SourceDescription$MSKSourceDescription": "

The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.

" + } + }, "NoEncryptionConfig": { "base": null, "refs": { @@ -1298,6 +1331,7 @@ "AmazonopensearchserviceDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

", "AmazonopensearchserviceDestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials.

", "AmazonopensearchserviceDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

", + "AuthenticationConfiguration$RoleARN": "

The ARN of the role used to access the Amazon MSK cluster.

", "ElasticsearchDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", "ElasticsearchDestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", "ElasticsearchDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", @@ -1523,6 +1557,13 @@ "DeliveryStreamDescription$LastUpdateTimestamp": "

The date and time that the delivery stream was last updated.

" } }, + "TopicName": { + "base": null, + "refs": { + "MSKSourceConfiguration$TopicName": "

The topic name within the Amazon MSK cluster.

", + "MSKSourceDescription$TopicName": "

The topic name within the Amazon MSK cluster.

" + } + }, "UntagDeliveryStreamInput": { "base": null, "refs": { diff --git a/models/apis/iot/2015-05-28/api-2.json b/models/apis/iot/2015-05-28/api-2.json index 4d3259dae3b..407a4606d8f 100644 --- a/models/apis/iot/2015-05-28/api-2.json +++ b/models/apis/iot/2015-05-28/api-2.json @@ -9190,9 +9190,37 @@ "topic":{"shape":"String"}, "key":{"shape":"String"}, "partition":{"shape":"String"}, - "clientProperties":{"shape":"ClientProperties"} + "clientProperties":{"shape":"ClientProperties"}, + "headers":{"shape":"KafkaHeaders"} } }, + "KafkaActionHeader":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"KafkaHeaderKey"}, + "value":{"shape":"KafkaHeaderValue"} + } + }, + "KafkaHeaderKey":{ + "type":"string", + "max":16384, + "min":0 + }, + "KafkaHeaderValue":{ + "type":"string", + "max":16384, + "min":0 + }, + "KafkaHeaders":{ + "type":"list", + "member":{"shape":"KafkaActionHeader"}, + "max":100, + "min":1 + }, "Key":{"type":"string"}, "KeyName":{ "type":"string", @@ -11147,7 +11175,9 @@ "THING_GROUP", "CLIENT_ID", "SOURCE_IP", - "PRINCIPAL_ID" + "PRINCIPAL_ID", + "EVENT_TYPE", + "DEVICE_DEFENDER" ] }, "LoggingOptionsPayload":{ diff --git a/models/apis/iot/2015-05-28/docs-2.json b/models/apis/iot/2015-05-28/docs-2.json index 5ab100c8bdc..c984b325ddc 100644 --- a/models/apis/iot/2015-05-28/docs-2.json +++ b/models/apis/iot/2015-05-28/docs-2.json @@ -21,7 +21,7 @@ "CreateAuditSuppression": "

Creates a Device Defender audit suppression.

Requires permission to access the CreateAuditSuppression action.

", "CreateAuthorizer": "

Creates an authorizer.

Requires permission to access the CreateAuthorizer action.

", "CreateBillingGroup": "

Creates a billing group.

Requires permission to access the CreateBillingGroup action.

", - "CreateCertificateFromCsr": "

Creates an X.509 certificate using the specified certificate signing request.

Requires permission to access the CreateCertificateFromCsr action.

The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256 or NIST P-384 curves. For supported certificates, consult Certificate signing algorithms supported by IoT.

Reusing the same certificate signing request (CSR) results in a distinct certificate.

You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs. In the following commands, we assume that a set of CSRs are located inside of the directory my-csr-directory:

On Linux and OS X, the command is:

$ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr Amazon Web Services CLI command to create a certificate for the corresponding CSR.

You can also run the aws iot create-certificate-from-csr part of the command in parallel to speed up the certificate creation process:

$ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:

> ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}

On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:

> forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"

", + "CreateCertificateFromCsr": "

Creates an X.509 certificate using the specified certificate signing request.

Requires permission to access the CreateCertificateFromCsr action.

The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256, NIST P-384, or NIST P-521 curves. For supported certificates, consult Certificate signing algorithms supported by IoT.

Reusing the same certificate signing request (CSR) results in a distinct certificate.

You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs. In the following commands, we assume that a set of CSRs are located inside of the directory my-csr-directory:

On Linux and OS X, the command is:

$ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr Amazon Web Services CLI command to create a certificate for the corresponding CSR.

You can also run the aws iot create-certificate-from-csr part of the command in parallel to speed up the certificate creation process:

$ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:

> ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}

On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:

> forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"

", "CreateCustomMetric": "

Use this API to define a Custom Metric published by your devices to Device Defender.

Requires permission to access the CreateCustomMetric action.

", "CreateDimension": "

Create a dimension that you can use to limit the scope of a metric used in a security profile for IoT Device Defender. For example, using a TOPIC_FILTER dimension, you can narrow down the scope of the metric only to MQTT topics whose name match the pattern specified in the dimension.

Requires permission to access the CreateDimension action.

", "CreateDomainConfiguration": "

Creates a domain configuration.

Requires permission to access the CreateDomainConfiguration action.

", @@ -65,7 +65,7 @@ "DeleteMitigationAction": "

Deletes a defined mitigation action from your Amazon Web Services accounts.

Requires permission to access the DeleteMitigationAction action.

", "DeleteOTAUpdate": "

Delete an OTA update.

Requires permission to access the DeleteOTAUpdate action.

", "DeletePackage": "

Deletes a specific version from a software package.

Note: All package versions must be deleted before deleting the software package.

Requires permission to access the DeletePackageVersion action.

", - "DeletePackageVersion": "

Deletes a specific version from a software package.

Note: If a package version is designated as default, you must remove the designation from the package using the UpdatePackage action.

", + "DeletePackageVersion": "

Deletes a specific version from a software package.

Note: If a package version is designated as default, you must remove the designation from the software package using the UpdatePackage action.

", "DeletePolicy": "

Deletes the specified policy.

A policy cannot be deleted if it has non-default versions or it is attached to any certificate.

To delete a policy, use the DeletePolicyVersion action to delete all non-default versions of the policy; use the DetachPolicy action to detach the policy from any certificate; and then use the DeletePolicy action to delete the policy.

When a policy is deleted using DeletePolicy, its default version is deleted with it.

Because of the distributed nature of Amazon Web Services, it can take up to five minutes after a policy is detached before it's ready to be deleted.

Requires permission to access the DeletePolicy action.

", "DeletePolicyVersion": "

Deletes the specified version of the specified policy. You cannot delete the default version of a policy using this action. To delete the default version of a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

Requires permission to access the DeletePolicyVersion action.

", "DeleteProvisioningTemplate": "

Deletes a provisioning template.

Requires permission to access the DeleteProvisioningTemplate action.

", @@ -239,8 +239,8 @@ "UpdateIndexingConfiguration": "

Updates the search configuration.

Requires permission to access the UpdateIndexingConfiguration action.

", "UpdateJob": "

Updates supported fields of the specified job.

Requires permission to access the UpdateJob action.

", "UpdateMitigationAction": "

Updates the definition for the specified mitigation action.

Requires permission to access the UpdateMitigationAction action.

", - "UpdatePackage": "

Updates the supported fields for a specific package.

Requires permission to access the UpdatePackage and GetIndexingConfiguration actions.

", - "UpdatePackageConfiguration": "

Updates the package configuration.

Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions.

", + "UpdatePackage": "

Updates the supported fields for a specific software package.

Requires permission to access the UpdatePackage and GetIndexingConfiguration actions.

", + "UpdatePackageConfiguration": "

Updates the software package configuration.

Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions.

", "UpdatePackageVersion": "

Updates the supported fields for a specific package version.

Requires permission to access the UpdatePackageVersion and GetIndexingConfiguration actions.

", "UpdateProvisioningTemplate": "

Updates a provisioning template.

Requires permission to access the UpdateProvisioningTemplate action.

", "UpdateRoleAlias": "

Updates a role alias.

Requires permission to access the UpdateRoleAlias action.

", @@ -381,7 +381,7 @@ "AdditionalParameterMap": { "base": null, "refs": { - "CreateOTAUpdateRequest$additionalParameters": "

A list of additional OTA update parameters which are name-value pairs.

", + "CreateOTAUpdateRequest$additionalParameters": "

A list of additional OTA update parameters, which are name-value pairs. They won't be sent to devices as a part of the Job document.

", "OTAUpdateInfo$additionalParameters": "

A collection of name/value pairs

" } }, @@ -669,7 +669,7 @@ "AttributesMap": { "base": null, "refs": { - "OTAUpdateFile$attributes": "

A list of name/attribute pairs.

" + "OTAUpdateFile$attributes": "

A list of name-attribute pairs. They won't be sent to devices as a part of the Job document.

" } }, "AuditCheckConfiguration": { @@ -1204,7 +1204,7 @@ "BehaviorCriteria": { "base": "

The criteria by which the behavior is determined to be normal.

", "refs": { - "Behavior$criteria": "

The criteria that determine if a device is behaving normally in regard to the metric.

" + "Behavior$criteria": "

The criteria that determine if a device is behaving normally in regard to the metric.

In the IoT console, you can choose to be sent an alert through Amazon SNS when IoT Device Defender detects that a device is behaving anomalously.

" } }, "BehaviorCriteriaType": { @@ -3389,7 +3389,7 @@ "EndpointType": { "base": null, "refs": { - "DescribeEndpointRequest$endpointType": "

The endpoint type. Valid endpoint types include:

We strongly recommend that customers use the newer iot:Data-ATS endpoint type to avoid issues related to the widespread distrust of Symantec certificate authorities.

" + "DescribeEndpointRequest$endpointType": "

The endpoint type. Valid endpoint types include:

We strongly recommend that customers use the newer iot:Data-ATS endpoint type to avoid issues related to the widespread distrust of Symantec certificate authorities. ATS Signed Certificates are more secure and are trusted by most popular browsers.

" } }, "Environment": { @@ -4263,7 +4263,7 @@ "base": null, "refs": { "CreateJobRequest$documentSource": "

An S3 link, or S3 object URL, to the job document. The link is an Amazon S3 object URL and is required if you don't specify a value for document.

For example, --document-source https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0

For more information, see Methods for accessing a bucket.

", - "CreateJobTemplateRequest$documentSource": "

An S3 link to the job document to use in the template. Required if you don't specify a value for document.

If the job document resides in an S3 bucket, you must use a placeholder link when specifying the document.

The placeholder link is of the following form:

${aws:iot:s3-presigned-url:https://s3.amazonaws.com/bucket/key}

where bucket is your bucket name and key is the object in the bucket to which you are linking.

", + "CreateJobTemplateRequest$documentSource": "

An S3 link, or S3 object URL, to the job document. The link is an Amazon S3 object URL and is required if you don't specify a value for document.

For example, --document-source https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0

For more information, see Methods for accessing a bucket.

", "DescribeJobResponse$documentSource": "

An S3 link to the job document.

", "DescribeJobTemplateResponse$documentSource": "

An S3 link to the job document.

" } @@ -4458,6 +4458,30 @@ "Action$kafka": "

Send messages to an Amazon Managed Streaming for Apache Kafka (Amazon MSK) or self-managed Apache Kafka cluster.

" } }, + "KafkaActionHeader": { + "base": "

Specifies a Kafka header using key-value pairs when you create a Rule’s Kafka Action. You can use these headers to route data from IoT clients to downstream Kafka clusters without modifying your message payload.

For more information about Rule's Kafka action, see Apache Kafka.

", + "refs": { + "KafkaHeaders$member": null + } + }, + "KafkaHeaderKey": { + "base": null, + "refs": { + "KafkaActionHeader$key": "

The key of the Kafka header.

" + } + }, + "KafkaHeaderValue": { + "base": null, + "refs": { + "KafkaActionHeader$value": "

The value of the Kafka header.

" + } + }, + "KafkaHeaders": { + "base": null, + "refs": { + "KafkaAction$headers": "

The list of Kafka headers that you specify.

" + } + }, "Key": { "base": null, "refs": { @@ -5905,20 +5929,20 @@ "PackageName": { "base": null, "refs": { - "CreatePackageRequest$packageName": "

The name of the new package.

", - "CreatePackageResponse$packageName": "

The name of the package.

", - "CreatePackageVersionRequest$packageName": "

The name of the associated package.

", - "CreatePackageVersionResponse$packageName": "

The name of the associated package.

", - "DeletePackageRequest$packageName": "

The name of the target package.

", - "DeletePackageVersionRequest$packageName": "

The name of the associated package.

", - "GetPackageRequest$packageName": "

The name of the target package.

", - "GetPackageResponse$packageName": "

The name of the package.

", + "CreatePackageRequest$packageName": "

The name of the new software package.

", + "CreatePackageResponse$packageName": "

The name of the software package.

", + "CreatePackageVersionRequest$packageName": "

The name of the associated software package.

", + "CreatePackageVersionResponse$packageName": "

The name of the associated software package.

", + "DeletePackageRequest$packageName": "

The name of the target software package.

", + "DeletePackageVersionRequest$packageName": "

The name of the associated software package.

", + "GetPackageRequest$packageName": "

The name of the target software package.

", + "GetPackageResponse$packageName": "

The name of the software package.

", "GetPackageVersionRequest$packageName": "

The name of the associated package.

", - "GetPackageVersionResponse$packageName": "

The name of the package.

", - "ListPackageVersionsRequest$packageName": "

The name of the target package.

", - "PackageSummary$packageName": "

The name for the target package.

", + "GetPackageVersionResponse$packageName": "

The name of the software package.

", + "ListPackageVersionsRequest$packageName": "

The name of the target software package.

", + "PackageSummary$packageName": "

The name for the target software package.

", "PackageVersionSummary$packageName": "

The name of the associated software package.

", - "UpdatePackageRequest$packageName": "

The name of the target package.

", + "UpdatePackageRequest$packageName": "

The name of the target software package.

", "UpdatePackageVersionRequest$packageName": "

The name of the associated software package.

" } }, @@ -6767,7 +6791,7 @@ "CreatePackageVersionRequest$attributes": "

Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.

The combined size of all the attributes on a package version is limited to 3KB.

", "CreatePackageVersionResponse$attributes": "

Metadata that were added to the package version that can be used to define a package version’s configuration.

", "GetPackageVersionResponse$attributes": "

Metadata that were added to the package version that can be used to define a package version’s configuration.

", - "UpdatePackageVersionRequest$attributes": "

Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.

Note: Attributes can be updated only when the package version is in a draft state.

The combined size of all the attributes on a package version is limited to 3KB.

" + "UpdatePackageVersionRequest$attributes": "

Metadata that can be used to define a package version’s configuration. For example, the Amazon S3 file location, configuration options that are being sent to the device or fleet.

Note: Attributes can be updated only when the package version is in a draft state.

The combined size of all the attributes on a package version is limited to 3KB.

" } }, "ResourceDescription": { @@ -7623,8 +7647,8 @@ "base": null, "refs": { "ScheduledJobRollout$startTime": "

Displays the start times of the next seven maintenance window occurrences.

", - "SchedulingConfig$startTime": "

The time a job will begin rollout of the job document to all devices in the target group for a job. The startTime can be scheduled up to a year in advance and must be scheduled a minimum of thirty minutes from the current time. The date and time format for the startTime is YYYY-MM-DD for the date and HH:MM for the time.

", - "SchedulingConfig$endTime": "

The time a job will stop rollout of the job document to all devices in the target group for a job. The endTime must take place no later than two years from the current time and be scheduled a minimum of thirty minutes from the current time. The minimum duration between startTime and endTime is thirty minutes. The maximum duration between startTime and endTime is two years. The date and time format for the endTime is YYYY-MM-DD for the date and HH:MM for the time.

" + "SchedulingConfig$startTime": "

The time a job will begin rollout of the job document to all devices in the target group for a job. The startTime can be scheduled up to a year in advance and must be scheduled a minimum of thirty minutes from the current time. The date and time format for the startTime is YYYY-MM-DD for the date and HH:MM for the time.

For more information on the syntax for startTime when using an API command or the Command Line Interface, see Timestamp.

", + "SchedulingConfig$endTime": "

The time a job will stop rollout of the job document to all devices in the target group for a job. The endTime must take place no later than two years from the current time and be scheduled a minimum of thirty minutes from the current time. The minimum duration between startTime and endTime is thirty minutes. The maximum duration between startTime and endTime is two years. The date and time format for the endTime is YYYY-MM-DD for the date and HH:MM for the time.

For more information on the syntax for endTime when using an API command or the Command Line Interface, see Timestamp.

" } }, "StringList": { diff --git a/models/apis/iot/2015-05-28/endpoint-rule-set-1.json b/models/apis/iot/2015-05-28/endpoint-rule-set-1.json index 6190c5d138b..64486d98236 100644 --- a/models/apis/iot/2015-05-28/endpoint-rule-set-1.json +++ b/models/apis/iot/2015-05-28/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,299 +140,250 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iot-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://iot-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://iot-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://iot-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iot.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], + "conditions": [], "endpoint": { - "url": "https://iot.{Region}.amazonaws.com", + "url": "https://iot.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" - }, + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", { - "conditions": [ + "fn": "getAttr", + "argv": [ { - "fn": "stringEquals", - "argv": [ - "aws-cn", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://iot.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://iot.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-cn", { - "conditions": [ + "fn": "getAttr", + "argv": [ { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://iot.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://iot.{Region}.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", { - "conditions": [], - "endpoint": { - "url": "https://iot.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] } ] } - ] + ], + "endpoint": { + "url": "https://iot.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://iot.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/textract/2018-06-27/api-2.json b/models/apis/textract/2018-06-27/api-2.json index 45eec36ec62..5cd056bbf36 100644 --- a/models/apis/textract/2018-06-27/api-2.json +++ b/models/apis/textract/2018-06-27/api-2.json @@ -394,7 +394,17 @@ "QUERY_RESULT", "SIGNATURE", "TABLE_TITLE", - "TABLE_FOOTER" + "TABLE_FOOTER", + "LAYOUT_TEXT", + "LAYOUT_TITLE", + "LAYOUT_HEADER", + "LAYOUT_FOOTER", + "LAYOUT_SECTION_HEADER", + "LAYOUT_PAGE_NUMBER", + "LAYOUT_LIST", + "LAYOUT_FIGURE", + "LAYOUT_TABLE", + "LAYOUT_KEY_VALUE" ] }, "BoundingBox":{ @@ -591,7 +601,8 @@ "TABLES", "FORMS", "QUERIES", - "SIGNATURES" + "SIGNATURES", + "LAYOUT" ] }, "FeatureTypes":{ diff --git a/models/apis/textract/2018-06-27/docs-2.json b/models/apis/textract/2018-06-27/docs-2.json index 360c97f7eb1..77ef1ad25ab 100644 --- a/models/apis/textract/2018-06-27/docs-2.json +++ b/models/apis/textract/2018-06-27/docs-2.json @@ -84,7 +84,7 @@ "BlockType": { "base": null, "refs": { - "Block$BlockType": "

The type of text item that's recognized. In operations for text detection, the following types are returned:

In text analysis operations, the following types are returned:

" + "Block$BlockType": "

The type of text item that's recognized. In operations for text detection, the following types are returned:

In text analysis operations, the following types are returned:

" } }, "BoundingBox": { @@ -288,7 +288,7 @@ "FeatureTypes": { "base": null, "refs": { - "AnalyzeDocumentRequest$FeatureTypes": "

A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. Add SIGNATURES to return the locations of detected signatures. To perform both forms and table analysis, add TABLES and FORMS to FeatureTypes. To detect signatures within form data and table data, add SIGNATURES to either TABLES or FORMS. All lines and words detected in the document are included in the response (including text that isn't related to the value of FeatureTypes).

", + "AnalyzeDocumentRequest$FeatureTypes": "

A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. Add SIGNATURES to return the locations of detected signatures. Add LAYOUT to the list to return information about the layout of the document. To perform both forms and table analysis, add TABLES and FORMS to FeatureTypes. To detect signatures within the document and within form data and table data, add SIGNATURES to either TABLES or FORMS. All lines and words detected in the document are included in the response (including text that isn't related to the value of FeatureTypes).

", "StartDocumentAnalysisRequest$FeatureTypes": "

A list of the types of analysis to perform. Add TABLES to the list to return information about the tables that are detected in the input document. Add FORMS to return detected form data. To perform both types of analysis, add TABLES and FORMS to FeatureTypes. All lines and words detected in the document are included in the response (including text that isn't related to the value of FeatureTypes).

" } }, @@ -952,7 +952,7 @@ "Block$ColumnIndex": "

The column in which a table cell appears. The first column position is 1. ColumnIndex isn't returned by DetectDocumentText and GetDocumentTextDetection.

", "Block$RowSpan": "

The number of rows that a table cell spans. RowSpan isn't returned by DetectDocumentText and GetDocumentTextDetection.

", "Block$ColumnSpan": "

The number of columns that a table cell spans. ColumnSpan isn't returned by DetectDocumentText and GetDocumentTextDetection.

", - "Block$Page": "

The page on which a block was detected. Page is returned by synchronous and asynchronous operations. Page values greater than 1 are only returned for multipage documents that are in PDF or TIFF format. A scanned image (JPEG/PNG) provided to an asynchronous operation, even if it contains multiple document pages, is considered a single-page document. This means that for scanned images the value of Page is always 1. Synchronous operations will also return a Page value of 1 because every input document is considered to be a single-page document.

", + "Block$Page": "

The page on which a block was detected. Page is returned by synchronous and asynchronous operations. Page values greater than 1 are only returned for multipage documents that are in PDF or TIFF format. A scanned image (JPEG/PNG) provided to an asynchronous operation, even if it contains multiple document pages, is considered a single-page document. This means that for scanned images the value of Page is always 1.

", "DetectedSignature$Page": "

The page a detected signature was found on.

", "DocumentMetadata$Pages": "

The number of pages that are detected in the document.

", "ExpenseDocument$ExpenseIndex": "

Denotes which invoice or receipt in the document the information is coming from. First document will be 1, the second 2, and so on.

", diff --git a/models/apis/textract/2018-06-27/endpoint-rule-set-1.json b/models/apis/textract/2018-06-27/endpoint-rule-set-1.json index cf0040e78ad..5af848c1a2e 100644 --- a/models/apis/textract/2018-06-27/endpoint-rule-set-1.json +++ b/models/apis/textract/2018-06-27/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://textract-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://textract-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://textract-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://textract-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://textract.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://textract.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://textract.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://textract.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/textract/2018-06-27/endpoint-tests-1.json b/models/apis/textract/2018-06-27/endpoint-tests-1.json index a711008edd0..ef404320720 100644 --- a/models/apis/textract/2018-06-27/endpoint-tests-1.json +++ b/models/apis/textract/2018-06-27/endpoint-tests-1.json @@ -390,6 +390,17 @@ "UseDualStack": true } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -403,6 +414,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -416,6 +438,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -429,6 +462,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -492,6 +536,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 4e596321c16..cf157f6a2b7 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1430,11 +1430,59 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "appflow-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "appflow-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "appflow-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "appflow-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "appflow-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "appflow-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "appflow-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "appflow-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "application-autoscaling" : { @@ -6218,13 +6266,25 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "email-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "email-fips.ca-central-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -6232,6 +6292,20 @@ "deprecated" : true, "hostname" : "email-fips.us-east-1.amazonaws.com" }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "email-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "email-fips.us-west-1.amazonaws.com" + }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -6248,8 +6322,18 @@ "tags" : [ "fips" ] } ] }, - "us-east-2" : { }, - "us-west-1" : { }, + "us-east-2" : { + "variants" : [ { + "hostname" : "email-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "email-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-west-2" : { "variants" : [ { "hostname" : "email-fips.us-west-2.amazonaws.com", @@ -10875,8 +10959,14 @@ }, "meetings-chime" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-west-2" : { }, "il-central-1" : { }, "us-east-1" : { "variants" : [ { @@ -11831,6 +11921,12 @@ "deprecated" : true, "hostname" : "omics-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "omics.il-central-1.amazonaws.com" + }, "us-east-1" : { "credentialScope" : { "region" : "us-east-1" diff --git a/service/cognitoidentityprovider/api.go b/service/cognitoidentityprovider/api.go index a3088577b40..f645366c62c 100644 --- a/service/cognitoidentityprovider/api.go +++ b/service/cognitoidentityprovider/api.go @@ -13450,9 +13450,19 @@ type AdminCreateUserInput struct { // UserPoolId is a required field UserPoolId *string `min:"1" type:"string" required:"true"` - // The username for the user. Must be unique within the user pool. Must be a - // UTF-8 string between 1 and 128 characters. After the user is created, the - // username can't be changed. + // The value that you want to set as the username sign-in attribute. The following + // conditions apply to the username parameter. + // + // * The username can't be a duplicate of another username in the same user + // pool. + // + // * You can't change the value of a username after you create it. + // + // * You can only provide a value if usernames are a valid sign-in attribute + // for your user pool. If your user pool only supports phone numbers or email + // addresses as sign-in attributes, Amazon Cognito automatically generates + // a username value. For more information, see Customizing sign-in attributes + // (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases). // // Username is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by AdminCreateUserInput's @@ -34160,7 +34170,9 @@ type UserPoolDescriptionType struct { Name *string `min:"1" type:"string"` // The user pool status in a user pool description. - Status *string `type:"string" enum:"StatusType"` + // + // Deprecated: This property is no longer available. + Status *string `deprecated:"true" type:"string" enum:"StatusType"` } // String returns the string representation. @@ -34476,7 +34488,9 @@ type UserPoolType struct { SmsVerificationMessage *string `min:"6" type:"string"` // The status of a user pool. - Status *string `type:"string" enum:"StatusType"` + // + // Deprecated: This property is no longer available. + Status *string `deprecated:"true" type:"string" enum:"StatusType"` // The settings for updates to user attributes. These settings include the property // AttributesRequireVerificationBeforeUpdate, a user-pool setting that tells diff --git a/service/cognitoidentityprovider/examples_test.go b/service/cognitoidentityprovider/examples_test.go index de0500443bf..07667e3cc03 100644 --- a/service/cognitoidentityprovider/examples_test.go +++ b/service/cognitoidentityprovider/examples_test.go @@ -474,47 +474,3 @@ func ExampleCognitoIdentityProvider_ListUsers_shared00() { fmt.Println(result) } - -// A ListUsers request for the next 3 users whose email address starts with "testuser." -// This request submits a value for all possible parameters for ListUsers. By iterating -// the PaginationToken, you can page through and collect all users in a user pool. -func ExampleCognitoIdentityProvider_ListUsers_shared01() { - svc := cognitoidentityprovider.New(session.New()) - input := &cognitoidentityprovider.ListUsersInput{ - AttributesToGet: []*string{ - aws.String("email"), - aws.String("sub"), - }, - Filter: aws.String("\"email\"^=\"testuser\""), - Limit: aws.Int64(3), - PaginationToken: aws.String("abcd1234EXAMPLE"), - UserPoolId: aws.String("us-east-1_EXAMPLE"), - } - - result, err := svc.ListUsers(input) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case cognitoidentityprovider.ErrCodeInvalidParameterException: - fmt.Println(cognitoidentityprovider.ErrCodeInvalidParameterException, aerr.Error()) - case cognitoidentityprovider.ErrCodeResourceNotFoundException: - fmt.Println(cognitoidentityprovider.ErrCodeResourceNotFoundException, aerr.Error()) - case cognitoidentityprovider.ErrCodeTooManyRequestsException: - fmt.Println(cognitoidentityprovider.ErrCodeTooManyRequestsException, aerr.Error()) - case cognitoidentityprovider.ErrCodeNotAuthorizedException: - fmt.Println(cognitoidentityprovider.ErrCodeNotAuthorizedException, aerr.Error()) - case cognitoidentityprovider.ErrCodeInternalErrorException: - fmt.Println(cognitoidentityprovider.ErrCodeInternalErrorException, aerr.Error()) - default: - fmt.Println(aerr.Error()) - } - } else { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - } - return - } - - fmt.Println(result) -} diff --git a/service/firehose/api.go b/service/firehose/api.go index 8f3b930d70a..d11f38a7596 100644 --- a/service/firehose/api.go +++ b/service/firehose/api.go @@ -572,6 +572,12 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // for each delivery stream. For more information about limits and how to request // an increase, see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // +// Kinesis Data Firehose accumulates and publishes a particular metric for a +// customer account in one minute intervals. It is possible that the bursts +// of incoming bytes/records ingested to a delivery stream last only for a few +// seconds. Due to this, the actual spikes in the traffic might not be fully +// visible in the customer's 1 minute CloudWatch metrics. +// // You must specify the name of the delivery stream and the data record when // using PutRecord. The data record consists of a data blob that can be up to // 1,000 KiB in size, and any kind of data. For example, it can be a segment @@ -588,9 +594,14 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // to each record. Producer applications can use this ID for purposes such as // auditability and investigation. // -// If the PutRecord operation throws a ServiceUnavailableException, back off -// and retry. If the exception persists, it is possible that the throughput -// limits have been exceeded for the delivery stream. +// If the PutRecord operation throws a ServiceUnavailableException, the API +// is automatically reinvoked (retried) 3 times. If the exception persists, +// it is possible that the throughput limits have been exceeded for the delivery +// stream. +// +// Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) +// can result in data duplicates. For larger data assets, allow for a longer +// time out before retrying Put API operations. // // Data records sent to Kinesis Data Firehose are stored for 24 hours from the // time they are added to a delivery stream as it tries to send the records @@ -697,6 +708,12 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // To write single data records into a delivery stream, use PutRecord. Applications // using these operations are referred to as producers. // +// Kinesis Data Firehose accumulates and publishes a particular metric for a +// customer account in one minute intervals. It is possible that the bursts +// of incoming bytes/records ingested to a delivery stream last only for a few +// seconds. Due to this, the actual spikes in the traffic might not be fully +// visible in the customer's 1 minute CloudWatch metrics. +// // For information about service quota, see Amazon Kinesis Data Firehose Quota // (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // @@ -741,9 +758,13 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // corresponding charges). We recommend that you handle any duplicates at the // destination. // -// If PutRecordBatch throws ServiceUnavailableException, back off and retry. -// If the exception persists, it is possible that the throughput limits have -// been exceeded for the delivery stream. +// If PutRecordBatch throws ServiceUnavailableException, the API is automatically +// reinvoked (retried) 3 times. If the exception persists, it is possible that +// the throughput limits have been exceeded for the delivery stream. +// +// Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) +// can result in data duplicates. For larger data assets, allow for a longer +// time out before retrying Put API operations. // // Data records sent to Kinesis Data Firehose are stored for 24 hours from the // time they are added to a delivery stream as it attempts to send the records @@ -870,6 +891,10 @@ func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStre // Kinesis Data Firehose creates a grant that enables it to use the new CMK // to encrypt and decrypt data and to manage the grant. // +// For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption +// and CreateDeliveryStream should not be called with session credentials that +// are more than 6 hours old. +// // If a delivery stream already has encryption enabled and then you invoke this // operation to change the ARN of the CMK or both its type and ARN and you get // ENABLING_FAILED, this only means that the attempt to change the CMK failed. @@ -2572,6 +2597,70 @@ func (s *AmazonopensearchserviceRetryOptions) SetDurationInSeconds(v int64) *Ama return s } +// The authentication configuration of the Amazon MSK cluster. +type AuthenticationConfiguration struct { + _ struct{} `type:"structure"` + + // The type of connectivity used to access the Amazon MSK cluster. + // + // Connectivity is a required field + Connectivity *string `type:"string" required:"true" enum:"Connectivity"` + + // The ARN of the role used to access the Amazon MSK cluster. + // + // RoleARN is a required field + RoleARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthenticationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthenticationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthenticationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthenticationConfiguration"} + if s.Connectivity == nil { + invalidParams.Add(request.NewErrParamRequired("Connectivity")) + } + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectivity sets the Connectivity field's value. +func (s *AuthenticationConfiguration) SetConnectivity(v string) *AuthenticationConfiguration { + s.Connectivity = &v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *AuthenticationConfiguration) SetRoleARN(v string) *AuthenticationConfiguration { + s.RoleARN = &v + return s +} + // Describes hints for the buffering to perform before delivering data to the // destination. These options are treated as hints, and therefore Kinesis Data // Firehose might choose to use different values when it is optimal. The SizeInMBs @@ -2894,6 +2983,10 @@ type CreateDeliveryStreamInput struct { // Resource Name (ARN) and the role ARN for the source stream. KinesisStreamSourceConfiguration *KinesisStreamSourceConfiguration `type:"structure"` + // The configuration for the Amazon MSK cluster to be used as the source for + // a delivery stream. + MSKSourceConfiguration *MSKSourceConfiguration `type:"structure"` + // The destination in Amazon Redshift. You can specify only one destination. RedshiftDestinationConfiguration *RedshiftDestinationConfiguration `type:"structure"` @@ -2981,6 +3074,11 @@ func (s *CreateDeliveryStreamInput) Validate() error { invalidParams.AddNested("KinesisStreamSourceConfiguration", err.(request.ErrInvalidParams)) } } + if s.MSKSourceConfiguration != nil { + if err := s.MSKSourceConfiguration.Validate(); err != nil { + invalidParams.AddNested("MSKSourceConfiguration", err.(request.ErrInvalidParams)) + } + } if s.RedshiftDestinationConfiguration != nil { if err := s.RedshiftDestinationConfiguration.Validate(); err != nil { invalidParams.AddNested("RedshiftDestinationConfiguration", err.(request.ErrInvalidParams)) @@ -3067,6 +3165,12 @@ func (s *CreateDeliveryStreamInput) SetKinesisStreamSourceConfiguration(v *Kines return s } +// SetMSKSourceConfiguration sets the MSKSourceConfiguration field's value. +func (s *CreateDeliveryStreamInput) SetMSKSourceConfiguration(v *MSKSourceConfiguration) *CreateDeliveryStreamInput { + s.MSKSourceConfiguration = v + return s +} + // SetRedshiftDestinationConfiguration sets the RedshiftDestinationConfiguration field's value. func (s *CreateDeliveryStreamInput) SetRedshiftDestinationConfiguration(v *RedshiftDestinationConfiguration) *CreateDeliveryStreamInput { s.RedshiftDestinationConfiguration = v @@ -6858,6 +6962,154 @@ func (s *ListTagsForDeliveryStreamOutput) SetTags(v []*Tag) *ListTagsForDelivery return s } +// The configuration for the Amazon MSK cluster to be used as the source for +// a delivery stream. +type MSKSourceConfiguration struct { + _ struct{} `type:"structure"` + + // The authentication configuration of the Amazon MSK cluster. + // + // AuthenticationConfiguration is a required field + AuthenticationConfiguration *AuthenticationConfiguration `type:"structure" required:"true"` + + // The ARN of the Amazon MSK cluster. + // + // MSKClusterARN is a required field + MSKClusterARN *string `min:"1" type:"string" required:"true"` + + // The topic name within the Amazon MSK cluster. + // + // TopicName is a required field + TopicName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MSKSourceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MSKSourceConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MSKSourceConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MSKSourceConfiguration"} + if s.AuthenticationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationConfiguration")) + } + if s.MSKClusterARN == nil { + invalidParams.Add(request.NewErrParamRequired("MSKClusterARN")) + } + if s.MSKClusterARN != nil && len(*s.MSKClusterARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MSKClusterARN", 1)) + } + if s.TopicName == nil { + invalidParams.Add(request.NewErrParamRequired("TopicName")) + } + if s.TopicName != nil && len(*s.TopicName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TopicName", 1)) + } + if s.AuthenticationConfiguration != nil { + if err := s.AuthenticationConfiguration.Validate(); err != nil { + invalidParams.AddNested("AuthenticationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationConfiguration sets the AuthenticationConfiguration field's value. +func (s *MSKSourceConfiguration) SetAuthenticationConfiguration(v *AuthenticationConfiguration) *MSKSourceConfiguration { + s.AuthenticationConfiguration = v + return s +} + +// SetMSKClusterARN sets the MSKClusterARN field's value. +func (s *MSKSourceConfiguration) SetMSKClusterARN(v string) *MSKSourceConfiguration { + s.MSKClusterARN = &v + return s +} + +// SetTopicName sets the TopicName field's value. +func (s *MSKSourceConfiguration) SetTopicName(v string) *MSKSourceConfiguration { + s.TopicName = &v + return s +} + +// Details about the Amazon MSK cluster used as the source for a Kinesis Data +// Firehose delivery stream. +type MSKSourceDescription struct { + _ struct{} `type:"structure"` + + // The authentication configuration of the Amazon MSK cluster. + AuthenticationConfiguration *AuthenticationConfiguration `type:"structure"` + + // Kinesis Data Firehose starts retrieving records from the topic within the + // Amazon MSK cluster starting with this timestamp. + DeliveryStartTimestamp *time.Time `type:"timestamp"` + + // The ARN of the Amazon MSK cluster. + MSKClusterARN *string `min:"1" type:"string"` + + // The topic name within the Amazon MSK cluster. + TopicName *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MSKSourceDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MSKSourceDescription) GoString() string { + return s.String() +} + +// SetAuthenticationConfiguration sets the AuthenticationConfiguration field's value. +func (s *MSKSourceDescription) SetAuthenticationConfiguration(v *AuthenticationConfiguration) *MSKSourceDescription { + s.AuthenticationConfiguration = v + return s +} + +// SetDeliveryStartTimestamp sets the DeliveryStartTimestamp field's value. +func (s *MSKSourceDescription) SetDeliveryStartTimestamp(v time.Time) *MSKSourceDescription { + s.DeliveryStartTimestamp = &v + return s +} + +// SetMSKClusterARN sets the MSKClusterARN field's value. +func (s *MSKSourceDescription) SetMSKClusterARN(v string) *MSKSourceDescription { + s.MSKClusterARN = &v + return s +} + +// SetTopicName sets the TopicName field's value. +func (s *MSKSourceDescription) SetTopicName(v string) *MSKSourceDescription { + s.TopicName = &v + return s +} + // The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which // means converting it from the JSON format in preparation for serializing it // to the Parquet or ORC format. This is one of two deserializers you can choose, @@ -9160,6 +9412,10 @@ type SourceDescription struct { // The KinesisStreamSourceDescription value for the source Kinesis data stream. KinesisStreamSourceDescription *KinesisStreamSourceDescription `type:"structure"` + + // The configuration description for the Amazon MSK cluster to be used as the + // source for a delivery stream. + MSKSourceDescription *MSKSourceDescription `type:"structure"` } // String returns the string representation. @@ -9186,6 +9442,12 @@ func (s *SourceDescription) SetKinesisStreamSourceDescription(v *KinesisStreamSo return s } +// SetMSKSourceDescription sets the MSKSourceDescription field's value. +func (s *SourceDescription) SetMSKSourceDescription(v *MSKSourceDescription) *SourceDescription { + s.MSKSourceDescription = v + return s +} + // Describes the configuration of a destination in Splunk. type SplunkDestinationConfiguration struct { _ struct{} `type:"structure"` @@ -10604,6 +10866,22 @@ func CompressionFormat_Values() []string { } } +const ( + // ConnectivityPublic is a Connectivity enum value + ConnectivityPublic = "PUBLIC" + + // ConnectivityPrivate is a Connectivity enum value + ConnectivityPrivate = "PRIVATE" +) + +// Connectivity_Values returns all elements of the Connectivity enum +func Connectivity_Values() []string { + return []string{ + ConnectivityPublic, + ConnectivityPrivate, + } +} + const ( // ContentEncodingNone is a ContentEncoding enum value ContentEncodingNone = "NONE" @@ -10770,6 +11048,9 @@ const ( // DeliveryStreamTypeKinesisStreamAsSource is a DeliveryStreamType enum value DeliveryStreamTypeKinesisStreamAsSource = "KinesisStreamAsSource" + + // DeliveryStreamTypeMskasSource is a DeliveryStreamType enum value + DeliveryStreamTypeMskasSource = "MSKAsSource" ) // DeliveryStreamType_Values returns all elements of the DeliveryStreamType enum @@ -10777,6 +11058,7 @@ func DeliveryStreamType_Values() []string { return []string{ DeliveryStreamTypeDirectPut, DeliveryStreamTypeKinesisStreamAsSource, + DeliveryStreamTypeMskasSource, } } @@ -10983,6 +11265,9 @@ const ( // ProcessorParameterNameDelimiter is a ProcessorParameterName enum value ProcessorParameterNameDelimiter = "Delimiter" + + // ProcessorParameterNameCompressionFormat is a ProcessorParameterName enum value + ProcessorParameterNameCompressionFormat = "CompressionFormat" ) // ProcessorParameterName_Values returns all elements of the ProcessorParameterName enum @@ -10997,6 +11282,7 @@ func ProcessorParameterName_Values() []string { ProcessorParameterNameBufferIntervalInSeconds, ProcessorParameterNameSubRecordType, ProcessorParameterNameDelimiter, + ProcessorParameterNameCompressionFormat, } } @@ -11004,6 +11290,9 @@ const ( // ProcessorTypeRecordDeAggregation is a ProcessorType enum value ProcessorTypeRecordDeAggregation = "RecordDeAggregation" + // ProcessorTypeDecompression is a ProcessorType enum value + ProcessorTypeDecompression = "Decompression" + // ProcessorTypeLambda is a ProcessorType enum value ProcessorTypeLambda = "Lambda" @@ -11018,6 +11307,7 @@ const ( func ProcessorType_Values() []string { return []string{ ProcessorTypeRecordDeAggregation, + ProcessorTypeDecompression, ProcessorTypeLambda, ProcessorTypeMetadataExtraction, ProcessorTypeAppendDelimiterToRecord, diff --git a/service/iot/api.go b/service/iot/api.go index 472aef686de..ec893f741c5 100644 --- a/service/iot/api.go +++ b/service/iot/api.go @@ -1859,9 +1859,9 @@ func (c *IoT) CreateCertificateFromCsrRequest(input *CreateCertificateFromCsrInp // action. // // The CSR must include a public key that is either an RSA key with a length -// of at least 2048 bits or an ECC key from NIST P-256 or NIST P-384 curves. -// For supported certificates, consult Certificate signing algorithms supported -// by IoT (https://docs.aws.amazon.com/iot/latest/developerguide/x509-client-certs.html#x509-cert-algorithms). +// of at least 2048 bits or an ECC key from NIST P-256, NIST P-384, or NIST +// P-521 curves. For supported certificates, consult Certificate signing algorithms +// supported by IoT (https://docs.aws.amazon.com/iot/latest/developerguide/x509-client-certs.html#x509-cert-algorithms). // // Reusing the same certificate signing request (CSR) results in a distinct // certificate. @@ -6040,7 +6040,7 @@ func (c *IoT) DeletePackageVersionRequest(input *DeletePackageVersionInput) (req // Deletes a specific version from a software package. // // Note: If a package version is designated as default, you must remove the -// designation from the package using the UpdatePackage action. +// designation from the software package using the UpdatePackage action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -25280,7 +25280,7 @@ func (c *IoT) UpdatePackageRequest(input *UpdatePackageInput) (req *request.Requ // UpdatePackage API operation for AWS IoT. // -// Updates the supported fields for a specific package. +// Updates the supported fields for a specific software package. // // Requires permission to access the UpdatePackage (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) // and GetIndexingConfiguration (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) @@ -25370,7 +25370,7 @@ func (c *IoT) UpdatePackageConfigurationRequest(input *UpdatePackageConfiguratio // UpdatePackageConfiguration API operation for AWS IoT. // -// Updates the package configuration. +// Updates the software package configuration. // // Requires permission to access the UpdatePackageConfiguration (https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiot.html#awsiot-actions-as-permissions) // and iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) @@ -29753,6 +29753,9 @@ type Behavior struct { // The criteria that determine if a device is behaving normally in regard to // the metric. + // + // In the IoT console, you can choose to be sent an alert through Amazon SNS + // when IoT Device Defender detects that a device is behaving anomalously. Criteria *BehaviorCriteria `locationName:"criteria" type:"structure"` // What is measured by the behavior. @@ -33942,18 +33945,12 @@ type CreateJobTemplateInput struct { // The job document. Required if you don't specify a value for documentSource. Document *string `locationName:"document" type:"string"` - // An S3 link to the job document to use in the template. Required if you don't - // specify a value for document. - // - // If the job document resides in an S3 bucket, you must use a placeholder link - // when specifying the document. - // - // The placeholder link is of the following form: + // An S3 link, or S3 object URL, to the job document. The link is an Amazon + // S3 object URL and is required if you don't specify a value for document. // - // ${aws:iot:s3-presigned-url:https://s3.amazonaws.com/bucket/key} + // For example, --document-source https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0 // - // where bucket is your bucket name and key is the object in the bucket to which - // you are linking. + // For more information, see Methods for accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html). DocumentSource *string `locationName:"documentSource" min:"1" type:"string"` // The ARN of the job to use as the basis for the job template. @@ -34429,7 +34426,8 @@ func (s *CreateMitigationActionOutput) SetActionId(v string) *CreateMitigationAc type CreateOTAUpdateInput struct { _ struct{} `type:"structure"` - // A list of additional OTA update parameters which are name-value pairs. + // A list of additional OTA update parameters, which are name-value pairs. They + // won't be sent to devices as a part of the Job document. AdditionalParameters map[string]*string `locationName:"additionalParameters" type:"map"` // The criteria that determine when and how a job abort takes place. @@ -34735,7 +34733,7 @@ type CreatePackageInput struct { // String and GoString methods. Description *string `locationName:"description" type:"string" sensitive:"true"` - // The name of the new package. + // The name of the new software package. // // PackageName is a required field PackageName *string `location:"uri" locationName:"packageName" min:"1" type:"string" required:"true"` @@ -34821,7 +34819,7 @@ type CreatePackageOutput struct { // The Amazon Resource Name (ARN) for the package. PackageArn *string `locationName:"packageArn" type:"string"` - // The name of the package. + // The name of the software package. PackageName *string `locationName:"packageName" min:"1" type:"string"` } @@ -34889,7 +34887,7 @@ type CreatePackageVersionInput struct { // String and GoString methods. Description *string `locationName:"description" type:"string" sensitive:"true"` - // The name of the associated package. + // The name of the associated software package. // // PackageName is a required field PackageName *string `location:"uri" locationName:"packageName" min:"1" type:"string" required:"true"` @@ -35006,7 +35004,7 @@ type CreatePackageVersionOutput struct { // Error reason for a package version failure during creation or update. ErrorReason *string `locationName:"errorReason" type:"string"` - // The name of the associated package. + // The name of the associated software package. PackageName *string `locationName:"packageName" min:"1" type:"string"` // The Amazon Resource Name (ARN) for the package. @@ -38522,7 +38520,7 @@ type DeletePackageInput struct { // is required. ClientToken *string `location:"querystring" locationName:"clientToken" min:"36" type:"string" idempotencyToken:"true"` - // The name of the target package. + // The name of the target software package. // // PackageName is a required field PackageName *string `location:"uri" locationName:"packageName" min:"1" type:"string" required:"true"` @@ -38607,7 +38605,7 @@ type DeletePackageVersionInput struct { // is required. ClientToken *string `location:"querystring" locationName:"clientToken" min:"36" type:"string" idempotencyToken:"true"` - // The name of the associated package. + // The name of the associated software package. // // PackageName is a required field PackageName *string `location:"uri" locationName:"packageName" min:"1" type:"string" required:"true"` @@ -41451,7 +41449,8 @@ type DescribeEndpointInput struct { // // We strongly recommend that customers use the newer iot:Data-ATS endpoint // type to avoid issues related to the widespread distrust of Symantec certificate - // authorities. + // authorities. ATS Signed Certificates are more secure and are trusted by most + // popular browsers. EndpointType *string `location:"querystring" locationName:"endpointType" type:"string"` } @@ -46625,7 +46624,7 @@ func (s *GetPackageConfigurationOutput) SetVersionUpdateByJobsConfig(v *VersionU type GetPackageInput struct { _ struct{} `type:"structure" nopayload:"true"` - // The name of the target package. + // The name of the target software package. // // PackageName is a required field PackageName *string `location:"uri" locationName:"packageName" min:"1" type:"string" required:"true"` @@ -46693,7 +46692,7 @@ type GetPackageOutput struct { // The ARN for the package. PackageArn *string `locationName:"packageArn" type:"string"` - // The name of the package. + // The name of the software package. PackageName *string `locationName:"packageName" min:"1" type:"string"` } @@ -46844,7 +46843,7 @@ type GetPackageVersionOutput struct { // The date when the package version was last updated. LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` - // The name of the package. + // The name of the software package. PackageName *string `locationName:"packageName" min:"1" type:"string"` // The ARN for the package version. @@ -50155,6 +50154,9 @@ type KafkaAction struct { // DestinationArn is a required field DestinationArn *string `locationName:"destinationArn" type:"string" required:"true"` + // The list of Kafka headers that you specify. + Headers []*KafkaActionHeader `locationName:"headers" min:"1" type:"list"` + // The Kafka message key. Key *string `locationName:"key" type:"string"` @@ -50194,9 +50196,22 @@ func (s *KafkaAction) Validate() error { if s.DestinationArn == nil { invalidParams.Add(request.NewErrParamRequired("DestinationArn")) } + if s.Headers != nil && len(s.Headers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Headers", 1)) + } if s.Topic == nil { invalidParams.Add(request.NewErrParamRequired("Topic")) } + if s.Headers != nil { + for i, v := range s.Headers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Headers", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -50216,6 +50231,12 @@ func (s *KafkaAction) SetDestinationArn(v string) *KafkaAction { return s } +// SetHeaders sets the Headers field's value. +func (s *KafkaAction) SetHeaders(v []*KafkaActionHeader) *KafkaAction { + s.Headers = v + return s +} + // SetKey sets the Key field's value. func (s *KafkaAction) SetKey(v string) *KafkaAction { s.Key = &v @@ -50234,6 +50255,71 @@ func (s *KafkaAction) SetTopic(v string) *KafkaAction { return s } +// Specifies a Kafka header using key-value pairs when you create a Rule’s +// Kafka Action. You can use these headers to route data from IoT clients to +// downstream Kafka clusters without modifying your message payload. +// +// For more information about Rule's Kafka action, see Apache Kafka (https://docs.aws.amazon.com/iot/latest/developerguide/apache-kafka-rule-action.html). +type KafkaActionHeader struct { + _ struct{} `type:"structure"` + + // The key of the Kafka header. + // + // Key is a required field + Key *string `locationName:"key" type:"string" required:"true"` + + // The value of the Kafka header. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KafkaActionHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KafkaActionHeader) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KafkaActionHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KafkaActionHeader"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *KafkaActionHeader) SetKey(v string) *KafkaActionHeader { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *KafkaActionHeader) SetValue(v string) *KafkaActionHeader { + s.Value = &v + return s +} + // Describes a key pair. type KeyPair struct { _ struct{} `type:"structure"` @@ -53907,7 +53993,7 @@ type ListPackageVersionsInput struct { // The token for the next set of results. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // The name of the target package. + // The name of the target software package. // // PackageName is a required field PackageName *string `location:"uri" locationName:"packageName" min:"1" type:"string" required:"true"` @@ -58784,7 +58870,8 @@ func (s *NotConfiguredException) RequestID() string { type OTAUpdateFile struct { _ struct{} `type:"structure"` - // A list of name/attribute pairs. + // A list of name-attribute pairs. They won't be sent to devices as a part of + // the Job document. Attributes map[string]*string `locationName:"attributes" type:"map"` // The code signing method of the file. @@ -59296,7 +59383,7 @@ type PackageSummary struct { // The date that the package was last updated. LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` - // The name for the target package. + // The name for the target software package. PackageName *string `locationName:"packageName" min:"1" type:"string"` } @@ -62292,6 +62379,9 @@ type SchedulingConfig struct { // minutes. The maximum duration between startTime and endTime is two years. // The date and time format for the endTime is YYYY-MM-DD for the date and HH:MM // for the time. + // + // For more information on the syntax for endTime when using an API command + // or the Command Line Interface, see Timestamp (https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters-types.html#parameter-type-timestamp). EndTime *string `locationName:"endTime" min:"1" type:"string"` // An optional configuration within the SchedulingConfig to setup a recurring @@ -62304,6 +62394,9 @@ type SchedulingConfig struct { // and must be scheduled a minimum of thirty minutes from the current time. // The date and time format for the startTime is YYYY-MM-DD for the date and // HH:MM for the time. + // + // For more information on the syntax for startTime when using an API command + // or the Command Line Interface, see Timestamp (https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters-types.html#parameter-type-timestamp). StartTime *string `locationName:"startTime" min:"1" type:"string"` } @@ -69667,7 +69760,7 @@ type UpdatePackageInput struct { // String and GoString methods. Description *string `locationName:"description" type:"string" sensitive:"true"` - // The name of the target package. + // The name of the target software package. // // PackageName is a required field PackageName *string `location:"uri" locationName:"packageName" min:"1" type:"string" required:"true"` @@ -69780,8 +69873,8 @@ type UpdatePackageVersionInput struct { Action *string `locationName:"action" type:"string" enum:"PackageVersionAction"` // Metadata that can be used to define a package version’s configuration. - // For example, the S3 file location, configuration options that are being sent - // to the device or fleet. + // For example, the Amazon S3 file location, configuration options that are + // being sent to the device or fleet. // // Note: Attributes can be updated only when the package version is in a draft // state. @@ -73181,6 +73274,12 @@ const ( // LogTargetTypePrincipalId is a LogTargetType enum value LogTargetTypePrincipalId = "PRINCIPAL_ID" + + // LogTargetTypeEventType is a LogTargetType enum value + LogTargetTypeEventType = "EVENT_TYPE" + + // LogTargetTypeDeviceDefender is a LogTargetType enum value + LogTargetTypeDeviceDefender = "DEVICE_DEFENDER" ) // LogTargetType_Values returns all elements of the LogTargetType enum @@ -73191,6 +73290,8 @@ func LogTargetType_Values() []string { LogTargetTypeClientId, LogTargetTypeSourceIp, LogTargetTypePrincipalId, + LogTargetTypeEventType, + LogTargetTypeDeviceDefender, } } diff --git a/service/textract/api.go b/service/textract/api.go index 622d6efae67..11bc373849f 100644 --- a/service/textract/api.go +++ b/service/textract/api.go @@ -1888,11 +1888,12 @@ type AnalyzeDocumentInput struct { // A list of the types of analysis to perform. Add TABLES to the list to return // information about the tables that are detected in the input document. Add // FORMS to return detected form data. Add SIGNATURES to return the locations - // of detected signatures. To perform both forms and table analysis, add TABLES - // and FORMS to FeatureTypes. To detect signatures within form data and table - // data, add SIGNATURES to either TABLES or FORMS. All lines and words detected - // in the document are included in the response (including text that isn't related - // to the value of FeatureTypes). + // of detected signatures. Add LAYOUT to the list to return information about + // the layout of the document. To perform both forms and table analysis, add + // TABLES and FORMS to FeatureTypes. To detect signatures within the document + // and within form data and table data, add SIGNATURES to either TABLES or FORMS. + // All lines and words detected in the document are included in the response + // (including text that isn't related to the value of FeatureTypes). // // FeatureTypes is a required field FeatureTypes []*string `type:"list" required:"true" enum:"FeatureType"` @@ -2440,7 +2441,7 @@ type Block struct { // button) or a check box that's detected on a document page. Use the value // of SelectionStatus to determine the status of the selection element. // - // * SIGNATURE - The location and confidene score of a signature detected + // * SIGNATURE - The location and confidence score of a signature detected // on a document page. Can be returned as part of a Key-Value pair or a detected // cell. // @@ -2510,9 +2511,7 @@ type Block struct { // multipage documents that are in PDF or TIFF format. A scanned image (JPEG/PNG) // provided to an asynchronous operation, even if it contains multiple document // pages, is considered a single-page document. This means that for scanned - // images the value of Page is always 1. Synchronous operations will also return - // a Page value of 1 because every input document is considered to be a single-page - // document. + // images the value of Page is always 1. Page *int64 `type:"integer"` // Each query contains the question you want to ask in the Text and the alias @@ -7236,6 +7235,36 @@ const ( // BlockTypeTableFooter is a BlockType enum value BlockTypeTableFooter = "TABLE_FOOTER" + + // BlockTypeLayoutText is a BlockType enum value + BlockTypeLayoutText = "LAYOUT_TEXT" + + // BlockTypeLayoutTitle is a BlockType enum value + BlockTypeLayoutTitle = "LAYOUT_TITLE" + + // BlockTypeLayoutHeader is a BlockType enum value + BlockTypeLayoutHeader = "LAYOUT_HEADER" + + // BlockTypeLayoutFooter is a BlockType enum value + BlockTypeLayoutFooter = "LAYOUT_FOOTER" + + // BlockTypeLayoutSectionHeader is a BlockType enum value + BlockTypeLayoutSectionHeader = "LAYOUT_SECTION_HEADER" + + // BlockTypeLayoutPageNumber is a BlockType enum value + BlockTypeLayoutPageNumber = "LAYOUT_PAGE_NUMBER" + + // BlockTypeLayoutList is a BlockType enum value + BlockTypeLayoutList = "LAYOUT_LIST" + + // BlockTypeLayoutFigure is a BlockType enum value + BlockTypeLayoutFigure = "LAYOUT_FIGURE" + + // BlockTypeLayoutTable is a BlockType enum value + BlockTypeLayoutTable = "LAYOUT_TABLE" + + // BlockTypeLayoutKeyValue is a BlockType enum value + BlockTypeLayoutKeyValue = "LAYOUT_KEY_VALUE" ) // BlockType_Values returns all elements of the BlockType enum @@ -7255,6 +7284,16 @@ func BlockType_Values() []string { BlockTypeSignature, BlockTypeTableTitle, BlockTypeTableFooter, + BlockTypeLayoutText, + BlockTypeLayoutTitle, + BlockTypeLayoutHeader, + BlockTypeLayoutFooter, + BlockTypeLayoutSectionHeader, + BlockTypeLayoutPageNumber, + BlockTypeLayoutList, + BlockTypeLayoutFigure, + BlockTypeLayoutTable, + BlockTypeLayoutKeyValue, } } @@ -7330,6 +7369,9 @@ const ( // FeatureTypeSignatures is a FeatureType enum value FeatureTypeSignatures = "SIGNATURES" + + // FeatureTypeLayout is a FeatureType enum value + FeatureTypeLayout = "LAYOUT" ) // FeatureType_Values returns all elements of the FeatureType enum @@ -7339,6 +7381,7 @@ func FeatureType_Values() []string { FeatureTypeForms, FeatureTypeQueries, FeatureTypeSignatures, + FeatureTypeLayout, } }