From 2cf8868dd8f0b7cb23b65ad6d42e4cde8a2ff44e Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 17 Oct 2023 18:17:36 +0000 Subject: [PATCH] Update to latest models --- .../api-change-codepipeline-8887.json | 5 + .../api-change-discovery-83838.json | 5 + .../next-release/api-change-ecs-10250.json | 5 + .../api-change-globalaccelerator-69921.json | 5 + .../api-change-guardduty-3358.json | 5 + .../next-release/api-change-kafka-68668.json | 5 + ...i-change-route53recoverycluster-93288.json | 5 + ...ge-route53recoverycontrolconfig-74952.json | 5 + .../2015-07-09/endpoint-rule-set-1.json | 40 +- .../codepipeline/2015-07-09/service-2.json | 9 +- .../2015-11-01/endpoint-rule-set-1.json | 40 +- .../data/discovery/2015-11-01/service-2.json | 4 +- .../ecs/2014-11-13/endpoint-rule-set-1.json | 40 +- botocore/data/ecs/2014-11-13/service-2.json | 10 +- .../2018-08-08/paginators-1.json | 6 + .../2017-11-28/endpoint-rule-set-1.json | 44 +- .../data/guardduty/2017-11-28/service-2.json | 7 +- .../kafka/2018-11-14/endpoint-rule-set-1.json | 44 +- .../data/kafka/2018-11-14/paginators-1.json | 6 + botocore/data/kafka/2018-11-14/service-2.json | 929 ++++++++++++++++++ .../2019-12-02/endpoint-rule-set-1.json | 40 +- .../2019-12-02/service-2.json | 28 +- .../2020-11-02/endpoint-rule-set-1.json | 40 +- .../2020-11-02/service-2.json | 81 ++ 24 files changed, 1244 insertions(+), 164 deletions(-) create mode 100644 .changes/next-release/api-change-codepipeline-8887.json create mode 100644 .changes/next-release/api-change-discovery-83838.json create mode 100644 .changes/next-release/api-change-ecs-10250.json create mode 100644 .changes/next-release/api-change-globalaccelerator-69921.json create mode 100644 .changes/next-release/api-change-guardduty-3358.json create mode 100644 .changes/next-release/api-change-kafka-68668.json create mode 100644 .changes/next-release/api-change-route53recoverycluster-93288.json create mode 100644 .changes/next-release/api-change-route53recoverycontrolconfig-74952.json diff --git a/.changes/next-release/api-change-codepipeline-8887.json b/.changes/next-release/api-change-codepipeline-8887.json new file mode 100644 index 0000000000..91c37dd35a --- /dev/null +++ b/.changes/next-release/api-change-codepipeline-8887.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``codepipeline``", + "description": "Add retryMode ALL_ACTIONS to RetryStageExecution API that retries a failed stage starting from first action in the stage" +} diff --git a/.changes/next-release/api-change-discovery-83838.json b/.changes/next-release/api-change-discovery-83838.json new file mode 100644 index 0000000000..653273217e --- /dev/null +++ b/.changes/next-release/api-change-discovery-83838.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``discovery``", + "description": "This release introduces three new APIs: StartBatchDeleteConfigurationTask, DescribeBatchDeleteConfigurationTask, and BatchDeleteAgents." +} diff --git a/.changes/next-release/api-change-ecs-10250.json b/.changes/next-release/api-change-ecs-10250.json new file mode 100644 index 0000000000..237f101dbf --- /dev/null +++ b/.changes/next-release/api-change-ecs-10250.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ecs``", + "description": "Documentation only updates to address Amazon ECS tickets." +} diff --git a/.changes/next-release/api-change-globalaccelerator-69921.json b/.changes/next-release/api-change-globalaccelerator-69921.json new file mode 100644 index 0000000000..633308e652 --- /dev/null +++ b/.changes/next-release/api-change-globalaccelerator-69921.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``globalaccelerator``", + "description": "Fixed error where ListCustomRoutingEndpointGroups did not have a paginator" +} diff --git a/.changes/next-release/api-change-guardduty-3358.json b/.changes/next-release/api-change-guardduty-3358.json new file mode 100644 index 0000000000..5fe0e24fa3 --- /dev/null +++ b/.changes/next-release/api-change-guardduty-3358.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``guardduty``", + "description": "Add domainWithSuffix finding field to dnsRequestAction" +} diff --git a/.changes/next-release/api-change-kafka-68668.json b/.changes/next-release/api-change-kafka-68668.json new file mode 100644 index 0000000000..e6c68f8484 --- /dev/null +++ b/.changes/next-release/api-change-kafka-68668.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``kafka``", + "description": "AWS Managed Streaming for Kafka is launching MSK Replicator, a new feature that enables customers to reliably replicate data across Amazon MSK clusters in same or different AWS regions. You can now use SDK to create, list, describe, delete, update, and manage tags of MSK Replicators." +} diff --git a/.changes/next-release/api-change-route53recoverycluster-93288.json b/.changes/next-release/api-change-route53recoverycluster-93288.json new file mode 100644 index 0000000000..9d6c902579 --- /dev/null +++ b/.changes/next-release/api-change-route53recoverycluster-93288.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``route53-recovery-cluster``", + "description": "Adds Owner field to ListRoutingControls API." +} diff --git a/.changes/next-release/api-change-route53recoverycontrolconfig-74952.json b/.changes/next-release/api-change-route53recoverycontrolconfig-74952.json new file mode 100644 index 0000000000..fe545deb2a --- /dev/null +++ b/.changes/next-release/api-change-route53recoverycontrolconfig-74952.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``route53-recovery-control-config``", + "description": "Adds permissions for GetResourcePolicy to support returning details about AWS Resource Access Manager resource policies for shared resources." +} diff --git a/botocore/data/codepipeline/2015-07-09/endpoint-rule-set-1.json b/botocore/data/codepipeline/2015-07-09/endpoint-rule-set-1.json index 0a96c803ac..b4ab7f1e16 100644 --- a/botocore/data/codepipeline/2015-07-09/endpoint-rule-set-1.json +++ b/botocore/data/codepipeline/2015-07-09/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index 47c7455521..4f83860925 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -513,7 +513,7 @@ {"shape":"StageNotRetryableException"}, {"shape":"NotLatestPipelineExecutionException"} ], - "documentation":"

Resumes the pipeline execution by retrying the last failed actions in a stage. You can retry a stage immediately if any of the actions in the stage fail. When you retry, all actions that are still in progress continue working, and failed actions are triggered again.

" + "documentation":"

You can retry a stage that has failed without having to run a pipeline again from the beginning. You do this by either retrying the failed actions in a stage or by retrying all actions in the stage starting from the first action in the stage. When you retry the failed actions in a stage, all actions that are still in progress continue working, and failed actions are triggered again. When you retry a failed stage from the first action in the stage, the stage cannot have any actions in progress. Before a stage can be retried, it must either have all actions failed or some actions failed and some succeeded.

" }, "StartPipelineExecution":{ "name":"StartPipelineExecution", @@ -3470,7 +3470,7 @@ }, "retryMode":{ "shape":"StageRetryMode", - "documentation":"

The scope of the retry attempt. Currently, the only supported value is FAILED_ACTIONS.

" + "documentation":"

The scope of the retry attempt.

" } }, "documentation":"

Represents the input of a RetryStageExecution action.

" @@ -3680,7 +3680,10 @@ }, "StageRetryMode":{ "type":"string", - "enum":["FAILED_ACTIONS"] + "enum":[ + "FAILED_ACTIONS", + "ALL_ACTIONS" + ] }, "StageState":{ "type":"structure", diff --git a/botocore/data/discovery/2015-11-01/endpoint-rule-set-1.json b/botocore/data/discovery/2015-11-01/endpoint-rule-set-1.json index 825e814d7f..a0af28bdd3 100644 --- a/botocore/data/discovery/2015-11-01/endpoint-rule-set-1.json +++ b/botocore/data/discovery/2015-11-01/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/discovery/2015-11-01/service-2.json b/botocore/data/discovery/2015-11-01/service-2.json index f09804e848..6df2190adb 100644 --- a/botocore/data/discovery/2015-11-01/service-2.json +++ b/botocore/data/discovery/2015-11-01/service-2.json @@ -750,7 +750,7 @@ "members":{ "message":{"shape":"Message"} }, - "documentation":"

", + "documentation":"

Conflict error.

", "exception":true }, "ContinuousExportDescription":{ @@ -1688,7 +1688,7 @@ "ImportTaskIdentifier":{ "type":"string", "max":200, - "pattern":"\\S*" + "pattern":"^import-task-[a-fA-F0-9]{32}$" }, "ImportTaskList":{ "type":"list", diff --git a/botocore/data/ecs/2014-11-13/endpoint-rule-set-1.json b/botocore/data/ecs/2014-11-13/endpoint-rule-set-1.json index 57a28815f4..463f1cfee2 100644 --- a/botocore/data/ecs/2014-11-13/endpoint-rule-set-1.json +++ b/botocore/data/ecs/2014-11-13/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 32440d8683..4ab8e6235d 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -1392,7 +1392,7 @@ "members":{ "namespace":{ "shape":"String", - "documentation":"

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. The namespace name can include up to 1024 characters. The name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than (<), or slash (/).

If you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this Amazon Web Services Region.

If you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the Command Line Interface. Other types of instance discovery aren't used by Service Connect.

If you update the service with an empty string \"\" for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in Cloud Map and must be deleted separately.

For more information about Cloud Map, see Working with Services in the Cloud Map Developer Guide.

" + "documentation":"

The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. The namespace name can include up to 1024 characters. The name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than (<), or slash (/).

If you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this Amazon Web Services Region.

If you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the Command Line Interface. Other types of instance discovery aren't used by Service Connect.

If you update the cluster with an empty string \"\" for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in Cloud Map and must be deleted separately.

For more information about Cloud Map, see Working with Services in the Cloud Map Developer Guide.

" } }, "documentation":"

Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the enabled parameter to true in the ServiceConnectConfiguration. You can set the namespace of each service individually in the ServiceConnectConfiguration to override this default parameter.

Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" @@ -2953,7 +2953,7 @@ "documentation":"

The file type to use. The only supported value is s3.

" } }, - "documentation":"

A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information about the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.

You must use the following platforms for the Fargate launch type:

" + "documentation":"

A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.

You must use the following platforms for the Fargate launch type:

Consider the following when using the Fargate launch type:

" }, "EnvironmentFileType":{ "type":"string", @@ -3235,7 +3235,7 @@ "documentation":"

The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the startPeriod is off.

If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

" } }, - "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

The following describes the possible healthStatus values for a container:

The following describes the possible healthStatus values for a task. The container health check status of non-essential containers don't have an effect on the health status of a task.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

" + "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

The following describes the possible healthStatus values for a container:

The following describes the possible healthStatus values based on the container health checker status of essential containers in the task with the following priority order (high to low):

Consider the following task health example with 2 containers.

Consider the following task health example with 3 containers.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

" }, "HealthStatus":{ "type":"string", @@ -4049,7 +4049,7 @@ }, "containerPortRange":{ "shape":"String", - "documentation":"

The port number range on the container that's bound to the dynamically mapped host port range.

The following rules apply when you specify a containerPortRange:

You can call DescribeTasks to view the hostPortRange which are the host ports that are bound to the container ports.

" + "documentation":"

The port number range on the container that's bound to the dynamically mapped host port range.

The following rules apply when you specify a containerPortRange:

You can call DescribeTasks to view the hostPortRange which are the host ports that are bound to the container ports.

" }, "hostPortRange":{ "shape":"String", @@ -4246,7 +4246,7 @@ }, "containerPortRange":{ "shape":"String", - "documentation":"

The port number range on the container that's bound to the dynamically mapped host port range.

The following rules apply when you specify a containerPortRange:

You can call DescribeTasks to view the hostPortRange which are the host ports that are bound to the container ports.

" + "documentation":"

The port number range on the container that's bound to the dynamically mapped host port range.

The following rules apply when you specify a containerPortRange:

You can call DescribeTasks to view the hostPortRange which are the host ports that are bound to the container ports.

" } }, "documentation":"

Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.

If you use containers in a task with the awsvpc or host network mode, specify the exposed ports using containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Most fields of this parameter (containerPort, hostPort, protocol) maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run . If the network mode of a task definition is set to host, host ports must either be undefined or match the container port in the port mapping.

You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

" diff --git a/botocore/data/globalaccelerator/2018-08-08/paginators-1.json b/botocore/data/globalaccelerator/2018-08-08/paginators-1.json index 4bc08a8129..8b72ab2d2b 100644 --- a/botocore/data/globalaccelerator/2018-08-08/paginators-1.json +++ b/botocore/data/globalaccelerator/2018-08-08/paginators-1.json @@ -47,6 +47,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "DestinationPortMappings" + }, + "ListCustomRoutingEndpointGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "EndpointGroups" } } } diff --git a/botocore/data/guardduty/2017-11-28/endpoint-rule-set-1.json b/botocore/data/guardduty/2017-11-28/endpoint-rule-set-1.json index 0742b35878..19c0c9651f 100644 --- a/botocore/data/guardduty/2017-11-28/endpoint-rule-set-1.json +++ b/botocore/data/guardduty/2017-11-28/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/guardduty/2017-11-28/service-2.json b/botocore/data/guardduty/2017-11-28/service-2.json index eaf260f47c..319287e331 100644 --- a/botocore/data/guardduty/2017-11-28/service-2.json +++ b/botocore/data/guardduty/2017-11-28/service-2.json @@ -3116,6 +3116,11 @@ "shape":"Boolean", "documentation":"

Indicates whether the targeted port is blocked.

", "locationName":"blocked" + }, + "DomainWithSuffix":{ + "shape":"String", + "documentation":"

The second and top level domain involved in the activity that prompted GuardDuty to generate this finding.

", + "locationName":"domainWithSuffix" } }, "documentation":"

Contains information about the DNS_REQUEST action described in this finding.

" @@ -5010,7 +5015,7 @@ }, "FindingCriteria":{ "shape":"FindingCriteria", - "documentation":"

Represents the criteria used for querying findings. Valid values include:

", + "documentation":"

Represents the criteria used for querying findings. Valid values include:

", "locationName":"findingCriteria" }, "SortCriteria":{ diff --git a/botocore/data/kafka/2018-11-14/endpoint-rule-set-1.json b/botocore/data/kafka/2018-11-14/endpoint-rule-set-1.json index 2ff3e3e03a..45039a0f5b 100644 --- a/botocore/data/kafka/2018-11-14/endpoint-rule-set-1.json +++ b/botocore/data/kafka/2018-11-14/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/kafka/2018-11-14/paginators-1.json b/botocore/data/kafka/2018-11-14/paginators-1.json index 003c335edd..92e6d9fd11 100644 --- a/botocore/data/kafka/2018-11-14/paginators-1.json +++ b/botocore/data/kafka/2018-11-14/paginators-1.json @@ -65,6 +65,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "ClusterOperationInfoList" + }, + "ListReplicators": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Replicators" } } } diff --git a/botocore/data/kafka/2018-11-14/service-2.json b/botocore/data/kafka/2018-11-14/service-2.json index 41b7fe3281..df44659296 100644 --- a/botocore/data/kafka/2018-11-14/service-2.json +++ b/botocore/data/kafka/2018-11-14/service-2.json @@ -186,6 +186,56 @@ ], "documentation": "\n

Creates a new MSK configuration.

\n " }, + "CreateReplicator": { + "name": "CreateReplicator", + "http": { + "method": "POST", + "requestUri": "/replication/v1/replicators", + "responseCode": 200 + }, + "input": { + "shape": "CreateReplicatorRequest" + }, + "output": { + "shape": "CreateReplicatorResponse", + "documentation": "

HTTP Status Code 200: OK.

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "UnauthorizedException", + "documentation": "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + }, + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

HTTP Status Code 429: Limit exceeded. Resource limit reached.

" + }, + { + "shape": "ConflictException", + "documentation": "

HTTP Status Code 409: Conflict. This replicator name already exists. Retry your request with another name.

" + } + ], + "documentation": "

Creates the replicator.

" + }, "CreateVpcConnection": { "name": "CreateVpcConnection", "http": { @@ -330,6 +380,51 @@ ], "documentation": "\n

Deletes an MSK Configuration.

\n " }, + "DeleteReplicator": { + "name": "DeleteReplicator", + "http": { + "method": "DELETE", + "requestUri": "/replication/v1/replicators/{replicatorArn}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteReplicatorRequest" + }, + "output": { + "shape": "DeleteReplicatorResponse", + "documentation": "HTTP Status Code 200: OK." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, { + "shape": "UnauthorizedException", + "documentation": "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + }, + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

HTTP Status Code 429: Limit exceeded. Resource limit reached.

" + } + ], + "documentation": "

Deletes a replicator.

" + }, "DeleteVpcConnection": { "name": "DeleteVpcConnection", "http": { @@ -600,6 +695,52 @@ ], "documentation": "\n

Returns a description of this revision of the configuration.

\n " }, + "DescribeReplicator": { + "name": "DescribeReplicator", + "http": { + "method": "GET", + "requestUri": "/replication/v1/replicators/{replicatorArn}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeReplicatorRequest" + }, + "output": { + "shape": "DescribeReplicatorResponse", + "documentation": "

HTTP Status Code 200: OK." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "UnauthorizedException", + "documentation": "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + }, + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

HTTP Status Code 429: Limit exceeded. Resource limit reached.

" + } + ], + "documentation": "

Describes a replicator.

" + }, "DescribeVpcConnection": { "name": "DescribeVpcConnection", "http": { @@ -1086,6 +1227,52 @@ ], "documentation": "\n

Returns a list of the broker nodes in the cluster.

\n " }, + "ListReplicators": { + "name": "ListReplicators", + "http": { + "method": "GET", + "requestUri": "/replication/v1/replicators", + "responseCode": 200 + }, + "input": { + "shape": "ListReplicatorsRequest" + }, + "output": { + "shape": "ListReplicatorsResponse", + "documentation": "HTTP Status Code 200: OK." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "UnauthorizedException", + "documentation": "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + }, + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

HTTP Status Code 429: Limit exceeded. Resource limit reached.

" + } + ], + "documentation": "

Lists the replicators.

" + }, "ListScramSecrets" : { "name" : "ListScramSecrets", "http" : { @@ -1722,6 +1909,52 @@ } ], "documentation" : "\n

Updates the monitoring settings for the cluster. You can use this operation to specify which Apache Kafka metrics you want Amazon MSK to send to Amazon CloudWatch. You can also specify settings for open monitoring with Prometheus.

\n " }, + "UpdateReplicationInfo": { + "name": "UpdateReplicationInfo", + "http": { + "method": "PUT", + "requestUri": "/replication/v1/replicators/{replicatorArn}/replication-info", + "responseCode": 200 + }, + "input": { + "shape": "UpdateReplicationInfoRequest" + }, + "output": { + "shape": "UpdateReplicationInfoResponse", + "documentation": "HTTP Status Code 200: OK." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "UnauthorizedException", + "documentation": "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, + { + "shape": "ForbiddenException", + "documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + }, + { + "shape": "NotFoundException", + "documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

HTTP Status Code 429: Limit exceeded. Resource limit reached.

" + } + ], + "documentation": "

Updates replication info of a replicator.

" + }, "UpdateSecurity" : { "name" : "UpdateSecurity", "http" : { @@ -1799,6 +2032,18 @@ } }, "shapes": { + "AmazonMskCluster": { + "type": "structure", + "members": { + "MskClusterArn": { + "shape": "__string", + "locationName": "mskClusterArn", + "documentation": "

The Amazon Resource Name (ARN) of an Amazon MSK cluster.

" + } + }, + "documentation": "

Details of an Amazon MSK Cluster.

", + "required": ["MskClusterArn"] + }, "BatchAssociateScramSecretRequest" : { "type" : "structure", "members" : { @@ -2763,6 +3008,60 @@ }, "documentation": "\n

Information about the broker access configuration.

\n " }, + "ConsumerGroupReplication": { + "type": "structure", + "members": { + "ConsumerGroupsToExclude": { + "shape": "__listOf__stringMax256", + "locationName": "consumerGroupsToExclude", + "documentation": "

List of regular expression patterns indicating the consumer groups that should not be replicated.

" + }, + "ConsumerGroupsToReplicate": { + "shape": "__listOf__stringMax256", + "locationName": "consumerGroupsToReplicate", + "documentation": "

List of regular expression patterns indicating the consumer groups to copy.

" + }, + "DetectAndCopyNewConsumerGroups": { + "shape": "__boolean", + "locationName": "detectAndCopyNewConsumerGroups", + "documentation": "

Enables synchronization of consumer groups to target cluster.

" + }, + "SynchroniseConsumerGroupOffsets": { + "shape": "__boolean", + "locationName": "synchroniseConsumerGroupOffsets", + "documentation": "

Enables synchronization of consumer group offsets to target cluster. The translated offsets will be written to topic __consumer_offsets.

" + } + }, + "documentation": "

Details about consumer group replication.

", + "required": ["ConsumerGroupsToReplicate"] + }, + "ConsumerGroupReplicationUpdate": { + "type": "structure", + "members": { + "ConsumerGroupsToExclude": { + "shape": "__listOf__stringMax256", + "locationName": "consumerGroupsToExclude", + "documentation": "

List of regular expression patterns indicating the consumer groups that should not be replicated.

" + }, + "ConsumerGroupsToReplicate": { + "shape": "__listOf__stringMax256", + "locationName": "consumerGroupsToReplicate", + "documentation": "

List of regular expression patterns indicating the consumer groups to copy.

" + }, + "DetectAndCopyNewConsumerGroups": { + "shape": "__boolean", + "locationName": "detectAndCopyNewConsumerGroups", + "documentation": "

Enables synchronization of consumer groups to target cluster.

" + }, + "SynchroniseConsumerGroupOffsets": { + "shape": "__boolean", + "locationName": "synchroniseConsumerGroupOffsets", + "documentation": "

Enables synchronization of consumer group offsets to target cluster. The translated offsets will be written to topic __consumer_offsets.

" + } + }, + "documentation": "

Details about consumer group replication.

", + "required": ["ConsumerGroupsToReplicate", "ConsumerGroupsToExclude", "SynchroniseConsumerGroupOffsets", "DetectAndCopyNewConsumerGroups"] + }, "CreateClusterV2Request": { "type": "structure", "members": { @@ -2965,6 +3264,63 @@ } } }, + "CreateReplicatorRequest": { + "type": "structure", + "members": { + "Description": { + "shape": "__stringMax1024", + "locationName": "description", + "documentation": "

A summary description of the replicator.

" + }, + "KafkaClusters": { + "shape": "__listOfKafkaCluster", + "locationName": "kafkaClusters", + "documentation": "

Kafka Clusters to use in setting up sources / targets for replication.

" + }, + "ReplicationInfoList": { + "shape": "__listOfReplicationInfo", + "locationName": "replicationInfoList", + "documentation": "

A list of replication configurations, where each configuration targets a given source cluster to target cluster replication flow.

" + }, + "ReplicatorName": { + "shape": "__stringMin1Max128Pattern09AZaZ09AZaZ0", + "locationName": "replicatorName", + "documentation": "

The name of the replicator. Alpha-numeric characters with '-' are allowed.

" + }, + "ServiceExecutionRoleArn": { + "shape": "__string", + "locationName": "serviceExecutionRoleArn", + "documentation": "

The ARN of the IAM role used by the replicator to access resources in the customer's account (e.g source and target clusters)

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

List of tags to attach to created Replicator.

" + } + }, + "documentation": "

Creates a replicator using the specified configuration.

", + "required": ["ServiceExecutionRoleArn", "ReplicatorName", "ReplicationInfoList", "KafkaClusters"] + }, + "CreateReplicatorResponse": { + "type": "structure", + "members": { + "ReplicatorArn": { + "shape": "__string", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator.

" + }, + "ReplicatorName": { + "shape": "__string", + "locationName": "replicatorName", + "documentation": "

Name of the replicator provided by the customer.

" + }, + "ReplicatorState": { + "shape": "ReplicatorState", + "locationName": "replicatorState", + "documentation": "

State of the replicator.

" + } + } + }, "CreateVpcConnectionRequest": { "type": "structure", "members": { @@ -3270,6 +3626,39 @@ } } }, + "DeleteReplicatorRequest": { + "type": "structure", + "members": { + "CurrentVersion": { + "shape": "__string", + "location": "querystring", + "locationName": "currentVersion", + "documentation": "

The current version of the replicator.

" + }, + "ReplicatorArn": { + "shape": "__string", + "location": "uri", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator to be deleted.

" + } + }, + "required": ["ReplicatorArn"] + }, + "DeleteReplicatorResponse": { + "type": "structure", + "members": { + "ReplicatorArn": { + "shape": "__string", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator.

" + }, + "ReplicatorState": { + "shape": "ReplicatorState", + "locationName": "replicatorState", + "documentation": "

The state of the replicator.

" + } + } + }, "DeleteVpcConnectionRequest": { "type": "structure", "members": { @@ -3512,6 +3901,88 @@ "Arn" ] }, + "DescribeReplicatorRequest": { + "type": "structure", + "members": { + "ReplicatorArn": { + "shape": "__string", + "location": "uri", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator to be described.

" + } + }, + "required": ["ReplicatorArn"] + }, + "DescribeReplicatorResponse": { + "type": "structure", + "members": { + "CreationTime": { + "shape": "__timestampIso8601", + "locationName": "creationTime", + "documentation": "

The time when the replicator was created.

" + }, + "CurrentVersion": { + "shape": "__string", + "locationName": "currentVersion", + "documentation": "

The current version number of the replicator.

" + }, + "IsReplicatorReference": { + "shape": "__boolean", + "locationName": "isReplicatorReference", + "documentation": "

Whether this resource is a replicator reference.

" + }, + "KafkaClusters": { + "shape": "__listOfKafkaClusterDescription", + "locationName": "kafkaClusters", + "documentation": "

Kafka Clusters used in setting up sources / targets for replication.

" + }, + "ReplicationInfoList": { + "shape": "__listOfReplicationInfoDescription", + "locationName": "replicationInfoList", + "documentation": "

A list of replication configurations, where each configuration targets a given source cluster to target cluster replication flow.

" + }, + "ReplicatorArn": { + "shape": "__string", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator.

" + }, + "ReplicatorDescription": { + "shape": "__string", + "locationName": "replicatorDescription", + "documentation": "

The description of the replicator.

" + }, + "ReplicatorName": { + "shape": "__string", + "locationName": "replicatorName", + "documentation": "

The name of the replicator.

" + }, + "ReplicatorResourceArn": { + "shape": "__string", + "locationName": "replicatorResourceArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator resource in the region where the replicator was created.

" + }, + "ReplicatorState": { + "shape": "ReplicatorState", + "locationName": "replicatorState", + "documentation": "

State of the replicator.

" + }, + "ServiceExecutionRoleArn": { + "shape": "__string", + "locationName": "serviceExecutionRoleArn", + "documentation": "

The Amazon Resource Name (ARN) of the IAM role used by the replicator to access resources in the customer's account (e.g source and target clusters)

" + }, + "StateInfo": { + "shape": "ReplicationStateInfo", + "locationName": "stateInfo", + "documentation": "

Details about the state of the replicator.

" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "

List of tags attached to the Replicator.

" + } + } + }, "DescribeVpcConnectionResponse": { "type": "structure", "members": { @@ -3876,6 +4347,77 @@ "httpStatusCode": 500 } }, + "KafkaCluster": { + "type": "structure", + "members": { + "AmazonMskCluster": { + "shape": "AmazonMskCluster", + "locationName": "amazonMskCluster", + "documentation": "

Details of an Amazon MSK Cluster.

" + }, + "VpcConfig": { + "shape": "KafkaClusterClientVpcConfig", + "locationName": "vpcConfig", + "documentation": "

Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

" + } + }, + "documentation": "

Information about Kafka Cluster to be used as source / target for replication.

", + "required": ["VpcConfig", "AmazonMskCluster"] + }, + "KafkaClusterClientVpcConfig" : { + "type" : "structure", + "members" : { + "SecurityGroupIds" : { + "shape" : "__listOf__string", + "locationName" : "securityGroupIds", + "documentation" : "

The security groups to attach to the ENIs for the broker nodes.

" + }, + "SubnetIds" : { + "shape" : "__listOf__string", + "locationName" : "subnetIds", + "documentation" : "

The list of subnets in the client VPC to connect to.

" + } + }, + "documentation" : "

Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

", + "required" : [ "SubnetIds" ] + }, + "KafkaClusterDescription": { + "type": "structure", + "members": { + "AmazonMskCluster": { + "shape": "AmazonMskCluster", + "locationName": "amazonMskCluster", + "documentation": "

Details of an Amazon MSK Cluster.

" + }, + "KafkaClusterAlias": { + "shape": "__string", + "locationName": "kafkaClusterAlias", + "documentation": "

The alias of the Kafka cluster. Used to prefix names of replicated topics.

" + }, + "VpcConfig": { + "shape": "KafkaClusterClientVpcConfig", + "locationName": "vpcConfig", + "documentation": "

Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

" + } + }, + "documentation": "

Information about Kafka Cluster used as source / target for replication.

" + }, + "KafkaClusterSummary": { + "type": "structure", + "members": { + "AmazonMskCluster": { + "shape": "AmazonMskCluster", + "locationName": "amazonMskCluster", + "documentation": "

Details of an Amazon MSK Cluster.

" + }, + "KafkaClusterAlias": { + "shape": "__string", + "locationName": "kafkaClusterAlias", + "documentation": "

The alias of the Kafka cluster. Used to prefix names of replicated topics.

" + } + }, + "documentation": "

Summarized information about Kafka Cluster used as source / target for replication.

" + }, "KafkaVersion": { "type": "structure", "members": { @@ -4202,6 +4744,44 @@ } } }, + "ListReplicatorsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

If the response of ListReplicators is truncated, it returns a NextToken in the response. This NextToken should be sent in the subsequent request to ListReplicators.

" + }, + "ReplicatorNameFilter": { + "shape": "__string", + "location": "querystring", + "locationName": "replicatorNameFilter", + "documentation": "

Returns replicators starting with given name.

" + } + } + }, + "ListReplicatorsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

If the response of ListReplicators is truncated, it returns a NextToken in the response. This NextToken should be sent in the subsequent request to ListReplicators.

" + }, + "Replicators": { + "shape": "__listOfReplicatorSummary", + "locationName": "replicators", + "documentation": "

List containing information of each of the replicators in the account.

" + } + } + }, "ListScramSecretsRequest" : { "type" : "structure", "members" : { @@ -4807,6 +5387,157 @@ "httpStatusCode": 404 } }, + "ReplicationInfo": { + "type": "structure", + "members": { + "ConsumerGroupReplication": { + "shape": "ConsumerGroupReplication", + "locationName": "consumerGroupReplication", + "documentation": "

Configuration relating to consumer group replication.

" + }, + "SourceKafkaClusterArn": { + "shape": "__string", + "locationName": "sourceKafkaClusterArn", + "documentation": "

The ARN of the source Kafka cluster.

" + }, + "TargetCompressionType": { + "shape": "TargetCompressionType", + "locationName": "targetCompressionType", + "documentation": "

The compression type to use when producing records to target cluster.

" + }, + "TargetKafkaClusterArn": { + "shape": "__string", + "locationName": "targetKafkaClusterArn", + "documentation": "

The ARN of the target Kafka cluster.

" + }, + "TopicReplication": { + "shape": "TopicReplication", + "locationName": "topicReplication", + "documentation": "

Configuration relating to topic replication.

" + } + }, + "documentation": "

Specifies configuration for replication between a source and target Kafka cluster.

", + "required": ["TargetCompressionType", "TopicReplication", "ConsumerGroupReplication", "SourceKafkaClusterArn", "TargetKafkaClusterArn"] + }, + "ReplicationInfoDescription": { + "type": "structure", + "members": { + "ConsumerGroupReplication": { + "shape": "ConsumerGroupReplication", + "locationName": "consumerGroupReplication", + "documentation": "

Configuration relating to consumer group replication.

" + }, + "SourceKafkaClusterAlias": { + "shape": "__string", + "locationName": "sourceKafkaClusterAlias", + "documentation": "

The alias of the source Kafka cluster.

" + }, + "TargetCompressionType": { + "shape": "TargetCompressionType", + "locationName": "targetCompressionType", + "documentation": "

The compression type to use when producing records to target cluster.

" + }, + "TargetKafkaClusterAlias": { + "shape": "__string", + "locationName": "targetKafkaClusterAlias", + "documentation": "

The alias of the target Kafka cluster.

" + }, + "TopicReplication": { + "shape": "TopicReplication", + "locationName": "topicReplication", + "documentation": "

Configuration relating to topic replication.

" + } + }, + "documentation": "

Specifies configuration for replication between a source and target Kafka cluster (sourceKafkaClusterAlias -> targetKafkaClusterAlias)

" + }, + "ReplicationInfoSummary": { + "type": "structure", + "members": { + "SourceKafkaClusterAlias": { + "shape": "__string", + "locationName": "sourceKafkaClusterAlias", + "documentation": "

The alias of the source Kafka cluster.

" + }, + "TargetKafkaClusterAlias": { + "shape": "__string", + "locationName": "targetKafkaClusterAlias", + "documentation": "

The alias of the target Kafka cluster.

" + } + }, + "documentation": "

Summarized information of replication between clusters.

" + }, + "ReplicationStateInfo" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "Code that describes the current state of the replicator." + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "Message that describes the state of the replicator." + } + }, + "documentation" : "Details about the state of a replicator" + }, + "ReplicatorState": { + "type": "string", + "documentation": "

The state of a replicator.

", + "enum": ["RUNNING", "CREATING", "UPDATING", "DELETING", "FAILED"] + }, + "ReplicatorSummary": { + "type": "structure", + "members": { + "CreationTime": { + "shape": "__timestampIso8601", + "locationName": "creationTime", + "documentation": "

The time the replicator was created.

" + }, + "CurrentVersion": { + "shape": "__string", + "locationName": "currentVersion", + "documentation": "

The current version of the replicator.

" + }, + "IsReplicatorReference": { + "shape": "__boolean", + "locationName": "isReplicatorReference", + "documentation": "

Whether this resource is a replicator reference.

" + }, + "KafkaClustersSummary": { + "shape": "__listOfKafkaClusterSummary", + "locationName": "kafkaClustersSummary", + "documentation": "

Kafka Clusters used in setting up sources / targets for replication.

" + }, + "ReplicationInfoSummaryList": { + "shape": "__listOfReplicationInfoSummary", + "locationName": "replicationInfoSummaryList", + "documentation": "

A list of summarized information of replications between clusters.

" + }, + "ReplicatorArn": { + "shape": "__string", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator.

" + }, + "ReplicatorName": { + "shape": "__string", + "locationName": "replicatorName", + "documentation": "

The name of the replicator.

" + }, + "ReplicatorResourceArn": { + "shape": "__string", + "locationName": "replicatorResourceArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator resource in the region where the replicator was created.

" + }, + "ReplicatorState": { + "shape": "ReplicatorState", + "locationName": "replicatorState", + "documentation": "

State of the replicator.

" + } + }, + "documentation": "

Information about a replicator.

" + }, "ServiceUnavailableException": { "type": "structure", "members": { @@ -4876,6 +5607,11 @@ "Tags" ] }, + "TargetCompressionType": { + "type": "string", + "documentation": "

The type of compression to use producing records to the target cluster.

", + "enum": ["NONE", "GZIP", "SNAPPY", "LZ4", "ZSTD"] + }, "Tls": { "type": "structure", "members": { @@ -4923,6 +5659,70 @@ "httpStatusCode": 429 } }, + "TopicReplication": { + "type": "structure", + "members": { + "CopyAccessControlListsForTopics": { + "shape": "__boolean", + "locationName": "copyAccessControlListsForTopics", + "documentation": "

Whether to periodically configure remote topic ACLs to match their corresponding upstream topics.

" + }, + "CopyTopicConfigurations": { + "shape": "__boolean", + "locationName": "copyTopicConfigurations", + "documentation": "

Whether to periodically configure remote topics to match their corresponding upstream topics.

" + }, + "DetectAndCopyNewTopics": { + "shape": "__boolean", + "locationName": "detectAndCopyNewTopics", + "documentation": "

Whether to periodically check for new topics and partitions.

" + }, + "TopicsToExclude": { + "shape": "__listOf__stringMax249", + "locationName": "topicsToExclude", + "documentation": "

List of regular expression patterns indicating the topics that should not be replicated.

" + }, + "TopicsToReplicate": { + "shape": "__listOf__stringMax249", + "locationName": "topicsToReplicate", + "documentation": "

List of regular expression patterns indicating the topics to copy.

" + } + }, + "documentation": "

Details about topic replication.

", + "required": ["TopicsToReplicate"] + }, + "TopicReplicationUpdate": { + "type": "structure", + "members": { + "CopyAccessControlListsForTopics": { + "shape": "__boolean", + "locationName": "copyAccessControlListsForTopics", + "documentation": "

Whether to periodically configure remote topic ACLs to match their corresponding upstream topics.

" + }, + "CopyTopicConfigurations": { + "shape": "__boolean", + "locationName": "copyTopicConfigurations", + "documentation": "

Whether to periodically configure remote topics to match their corresponding upstream topics.

" + }, + "DetectAndCopyNewTopics": { + "shape": "__boolean", + "locationName": "detectAndCopyNewTopics", + "documentation": "

Whether to periodically check for new topics and partitions.

" + }, + "TopicsToExclude": { + "shape": "__listOf__stringMax249", + "locationName": "topicsToExclude", + "documentation": "

List of regular expression patterns indicating the topics that should not be replicated.

" + }, + "TopicsToReplicate": { + "shape": "__listOf__stringMax249", + "locationName": "topicsToReplicate", + "documentation": "

List of regular expression patterns indicating the topics to copy.

" + } + }, + "documentation": "

Details for updating the topic replication of a replicator.

", + "required": ["TopicsToReplicate", "TopicsToExclude", "CopyTopicConfigurations", "DetectAndCopyNewTopics", "CopyAccessControlListsForTopics"] + }, "Unauthenticated" : { "type" : "structure", "members" : { @@ -5248,6 +6048,59 @@ } } }, + "UpdateReplicationInfoRequest": { + "type": "structure", + "members": { + "ConsumerGroupReplication": { + "shape": "ConsumerGroupReplicationUpdate", + "locationName": "consumerGroupReplication", + "documentation": "

Updated consumer group replication information.

" + }, + "CurrentVersion": { + "shape": "__string", + "locationName": "currentVersion", + "documentation": "

Current replicator version.

" + }, + "ReplicatorArn": { + "shape": "__string", + "location": "uri", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator to be updated.

" + }, + "SourceKafkaClusterArn": { + "shape": "__string", + "locationName": "sourceKafkaClusterArn", + "documentation": "

The ARN of the source Kafka cluster.

" + }, + "TargetKafkaClusterArn": { + "shape": "__string", + "locationName": "targetKafkaClusterArn", + "documentation": "

The ARN of the target Kafka cluster.

" + }, + "TopicReplication": { + "shape": "TopicReplicationUpdate", + "locationName": "topicReplication", + "documentation": "

Updated topic replication information.

" + } + }, + "documentation": "

Update information relating to replication between a given source and target Kafka cluster.

", + "required": ["ReplicatorArn", "SourceKafkaClusterArn", "CurrentVersion", "TargetKafkaClusterArn"] + }, + "UpdateReplicationInfoResponse": { + "type": "structure", + "members": { + "ReplicatorArn": { + "shape": "__string", + "locationName": "replicatorArn", + "documentation": "

The Amazon Resource Name (ARN) of the replicator.

" + }, + "ReplicatorState": { + "shape": "ReplicatorState", + "locationName": "replicatorState", + "documentation": "

State of the replicator.

" + } + } + }, "UpdateSecurityRequest" : { "type" : "structure", "members" : { @@ -5636,6 +6489,24 @@ "shape": "KafkaVersion" } }, + "__listOfKafkaCluster": { + "type": "list", + "member": { + "shape": "KafkaCluster" + } + }, + "__listOfKafkaClusterDescription": { + "type": "list", + "member": { + "shape": "KafkaClusterDescription" + } + }, + "__listOfKafkaClusterSummary": { + "type": "list", + "member": { + "shape": "KafkaClusterSummary" + } + }, "__listOfNodeInfo": { "type": "list", "member": { @@ -5648,6 +6519,30 @@ "shape": "ClientVpcConnection" } }, + "__listOfReplicationInfo": { + "type": "list", + "member": { + "shape": "ReplicationInfo" + } + }, + "__listOfReplicationInfoDescription": { + "type": "list", + "member": { + "shape": "ReplicationInfoDescription" + } + }, + "__listOfReplicationInfoSummary": { + "type": "list", + "member": { + "shape": "ReplicationInfoSummary" + } + }, + "__listOfReplicatorSummary": { + "type": "list", + "member": { + "shape": "ReplicatorSummary" + } + }, "__listOfVpcConnection": { "type": "list", "member": { @@ -5678,9 +6573,33 @@ "shape": "__string" } }, + "__listOf__stringMax249": { + "type": "list", + "member": { + "shape": "__stringMax249" + } + }, + "__listOf__stringMax256": { + "type": "list", + "member": { + "shape": "__stringMax256" + } + }, "__string": { "type": "string" }, + "__stringMax1024": { + "type": "string", + "max": 1024 + }, + "__stringMax249": { + "type": "string", + "max": 249 + }, + "__stringMax256": { + "type": "string", + "max": 256 + }, "__stringMin1Max128": { "type": "string", "min": 1, @@ -5696,9 +6615,19 @@ "min": 5, "max": 32 }, + "__stringMin1Max128Pattern09AZaZ09AZaZ0": { + "type": "string", + "min": 1, + "max": 128, + "pattern": "^[0-9A-Za-z][0-9A-Za-z-]{0,}$" + }, "__timestampIso8601": { "type": "timestamp", "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" } }, "documentation": "\n

The operations for managing an Amazon MSK cluster.

\n " diff --git a/botocore/data/route53-recovery-cluster/2019-12-02/endpoint-rule-set-1.json b/botocore/data/route53-recovery-cluster/2019-12-02/endpoint-rule-set-1.json index d5b17b6746..9435139fc0 100644 --- a/botocore/data/route53-recovery-cluster/2019-12-02/endpoint-rule-set-1.json +++ b/botocore/data/route53-recovery-cluster/2019-12-02/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/route53-recovery-cluster/2019-12-02/service-2.json b/botocore/data/route53-recovery-cluster/2019-12-02/service-2.json index d3315b6b38..93803ee4b4 100644 --- a/botocore/data/route53-recovery-cluster/2019-12-02/service-2.json +++ b/botocore/data/route53-recovery-cluster/2019-12-02/service-2.json @@ -29,7 +29,7 @@ {"shape":"ThrottlingException"}, {"shape":"EndpointTemporarilyUnavailableException"} ], - "documentation":"

Get the state for a routing control. A routing control is a simple on/off switch that you can use to route traffic to cells. When a routing control state is On, traffic flows to a cell. When the state is Off, traffic does not flow.

Before you can create a routing control, you must first create a cluster, and then host the control in a control panel on the cluster. For more information, see Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. You access one of the endpoints for the cluster to get or update the routing control state to redirect traffic for your application.

You must specify Regional endpoints when you work with API cluster operations to get or update routing control states in Route 53 ARC.

To see a code example for getting a routing control state, including accessing Regional cluster endpoints in sequence, see API examples in the Amazon Route 53 Application Recovery Controller Developer Guide.

Learn more about working with routing controls in the following topics in the Amazon Route 53 Application Recovery Controller Developer Guide:

" + "documentation":"

Get the state for a routing control. A routing control is a simple on/off switch that you can use to route traffic to cells. When a routing control state is set to ON, traffic flows to a cell. When the state is set to OFF, traffic does not flow.

Before you can create a routing control, you must first create a cluster, and then host the control in a control panel on the cluster. For more information, see Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. You access one of the endpoints for the cluster to get or update the routing control state to redirect traffic for your application.

You must specify Regional endpoints when you work with API cluster operations to get or update routing control states in Route 53 ARC.

To see a code example for getting a routing control state, including accessing Regional cluster endpoints in sequence, see API examples in the Amazon Route 53 Application Recovery Controller Developer Guide.

Learn more about working with routing controls in the following topics in the Amazon Route 53 Application Recovery Controller Developer Guide:

" }, "ListRoutingControls":{ "name":"ListRoutingControls", @@ -47,7 +47,7 @@ {"shape":"ThrottlingException"}, {"shape":"EndpointTemporarilyUnavailableException"} ], - "documentation":"

List routing control names and Amazon Resource Names (ARNs), as well as the routing control state for each routing control, along with the control panel name and control panel ARN for the routing controls. If you specify a control panel ARN, this call lists the routing controls in the control panel. Otherwise, it lists all the routing controls in the cluster.

A routing control is a simple on/off switch in Route 53 ARC that you can use to route traffic to cells. When a routing control state is On, traffic flows to a cell. When the state is Off, traffic does not flow.

Before you can create a routing control, you must first create a cluster, and then host the control in a control panel on the cluster. For more information, see Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. You access one of the endpoints for the cluster to get or update the routing control state to redirect traffic for your application.

You must specify Regional endpoints when you work with API cluster operations to use this API operation to list routing controls in Route 53 ARC.

Learn more about working with routing controls in the following topics in the Amazon Route 53 Application Recovery Controller Developer Guide:

" + "documentation":"

List routing control names and Amazon Resource Names (ARNs), as well as the routing control state for each routing control, along with the control panel name and control panel ARN for the routing controls. If you specify a control panel ARN, this call lists the routing controls in the control panel. Otherwise, it lists all the routing controls in the cluster.

A routing control is a simple on/off switch in Route 53 ARC that you can use to route traffic to cells. When a routing control state is set to ON, traffic flows to a cell. When the state is set to OFF, traffic does not flow.

Before you can create a routing control, you must first create a cluster, and then host the control in a control panel on the cluster. For more information, see Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. You access one of the endpoints for the cluster to get or update the routing control state to redirect traffic for your application.

You must specify Regional endpoints when you work with API cluster operations to use this API operation to list routing controls in Route 53 ARC.

Learn more about working with routing controls in the following topics in the Amazon Route 53 Application Recovery Controller Developer Guide:

" }, "UpdateRoutingControlState":{ "name":"UpdateRoutingControlState", @@ -66,7 +66,7 @@ {"shape":"EndpointTemporarilyUnavailableException"}, {"shape":"ConflictException"} ], - "documentation":"

Set the state of the routing control to reroute traffic. You can set the value to be On or Off. When the state is On, traffic flows to a cell. When the state is Off, traffic does not flow.

With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, there are scenarios when you might want to bypass the routing control safeguards that are enforced with safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, and one or more safety rules might be unexpectedly preventing you from updating a routing control state to reroute traffic. In a \"break glass\" scenario like this, you can override one or more safety rules to change a routing control state and fail over your application.

The SafetyRulesToOverride property enables you override one or more safety rules and update routing control states. For more information, see Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

You must specify Regional endpoints when you work with API cluster operations to get or update routing control states in Route 53 ARC.

To see a code example for getting a routing control state, including accessing Regional cluster endpoints in sequence, see API examples in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "documentation":"

Set the state of the routing control to reroute traffic. You can set the value to ON or OFF. When the state is ON, traffic flows to a cell. When the state is OFF, traffic does not flow.

With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, there are scenarios when you might want to bypass the routing control safeguards that are enforced with safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, and one or more safety rules might be unexpectedly preventing you from updating a routing control state to reroute traffic. In a \"break glass\" scenario like this, you can override one or more safety rules to change a routing control state and fail over your application.

The SafetyRulesToOverride property enables you override one or more safety rules and update routing control states. For more information, see Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

You must specify Regional endpoints when you work with API cluster operations to get or update routing control states in Route 53 ARC.

To see a code example for getting a routing control state, including accessing Regional cluster endpoints in sequence, see API examples in the Amazon Route 53 Application Recovery Controller Developer Guide.

" }, "UpdateRoutingControlStates":{ "name":"UpdateRoutingControlStates", @@ -86,7 +86,7 @@ {"shape":"ConflictException"}, {"shape":"ServiceLimitExceededException"} ], - "documentation":"

Set multiple routing control states. You can set the value for each state to be On or Off. When the state is On, traffic flows to a cell. When it's Off, traffic does not flow.

With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, there are scenarios when you might want to bypass the routing control safeguards that are enforced with safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, and one or more safety rules might be unexpectedly preventing you from updating a routing control state to reroute traffic. In a \"break glass\" scenario like this, you can override one or more safety rules to change a routing control state and fail over your application.

The SafetyRulesToOverride property enables you override one or more safety rules and update routing control states. For more information, see Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

You must specify Regional endpoints when you work with API cluster operations to get or update routing control states in Route 53 ARC.

To see a code example for getting a routing control state, including accessing Regional cluster endpoints in sequence, see API examples in the Amazon Route 53 Application Recovery Controller Developer Guide.

" + "documentation":"

Set multiple routing control states. You can set the value for each state to be ON or OFF. When the state is ON, traffic flows to a cell. When it's OFF, traffic does not flow.

With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing control state updates that help prevent unexpected outcomes, like fail open traffic routing. However, there are scenarios when you might want to bypass the routing control safeguards that are enforced with safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery, and one or more safety rules might be unexpectedly preventing you from updating a routing control state to reroute traffic. In a \"break glass\" scenario like this, you can override one or more safety rules to change a routing control state and fail over your application.

The SafetyRulesToOverride property enables you override one or more safety rules and update routing control states. For more information, see Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.

You must specify Regional endpoints when you work with API cluster operations to get or update routing control states in Route 53 ARC.

To see a code example for getting a routing control state, including accessing Regional cluster endpoints in sequence, see API examples in the Amazon Route 53 Application Recovery Controller Developer Guide.

" } }, "shapes":{ @@ -226,6 +226,12 @@ "type":"integer", "min":1 }, + "Owner":{ + "type":"string", + "max":1024, + "min":12, + "pattern":"^\\S+$" + }, "PageToken":{ "type":"string", "max":8096, @@ -266,7 +272,7 @@ }, "ControlPanelName":{ "shape":"ControlPanelName", - "documentation":"

The name of the control panel where the routing control is located.

" + "documentation":"

The name of the control panel where the routing control is located. Only ASCII characters are supported for control panel names.

" }, "RoutingControlArn":{ "shape":"Arn", @@ -278,10 +284,14 @@ }, "RoutingControlState":{ "shape":"RoutingControlState", - "documentation":"

The current state of the routing control. When a routing control state is On, traffic flows to a cell. When the state is Off, traffic does not flow.

" + "documentation":"

The current state of the routing control. When a routing control state is set to ON, traffic flows to a cell. When the state is set to OFF, traffic does not flow.

" + }, + "Owner":{ + "shape":"Owner", + "documentation":"

The Amazon Web Services account ID of the routing control owner.

" } }, - "documentation":"

A routing control, which is a simple on/off switch that you can use to route traffic to cells. When a routing control state is On, traffic flows to a cell. When the state is Off, traffic does not flow.

" + "documentation":"

A routing control, which is a simple on/off switch that you can use to route traffic to cells. When a routing control state is set to ON, traffic flows to a cell. When the state is set to OFF, traffic does not flow.

" }, "RoutingControlName":{ "type":"string", @@ -375,7 +385,7 @@ }, "RoutingControlState":{ "shape":"RoutingControlState", - "documentation":"

The state of the routing control. You can set the value to be On or Off.

" + "documentation":"

The state of the routing control. You can set the value to ON or OFF.

" }, "SafetyRulesToOverride":{ "shape":"Arns", @@ -452,5 +462,5 @@ ] } }, - "documentation":"

Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.

With Route 53 ARC, you can use routing control with extreme reliability to recover applications by rerouting traffic across Availability Zones or Amazon Web Services Regions. Routing controls are simple on/off switches hosted on a highly available cluster in Route 53 ARC. A cluster provides a set of five redundant Regional endpoints against which you can run API calls to get or update the state of routing controls. To implement failover, you set one routing control On and another one Off, to reroute traffic from one Availability Zone or Amazon Web Services Region to another.

Be aware that you must specify a Regional endpoint for a cluster when you work with API cluster operations to get or update routing control states in Route 53 ARC. In addition, you must specify the US West (Oregon) Region for Route 53 ARC API calls. For example, use the parameter --region us-west-2 with AWS CLI commands. For more information, see Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.

This API guide includes information about the API operations for how to get and update routing control states in Route 53 ARC. To work with routing control in Route 53 ARC, you must first create the required components (clusters, control panels, and routing controls) using the recovery cluster configuration API.

For more information about working with routing control in Route 53 ARC, see the following:

" + "documentation":"

Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.

With Route 53 ARC, you can use routing control with extreme reliability to recover applications by rerouting traffic across Availability Zones or Amazon Web Services Regions. Routing controls are simple on/off switches hosted on a highly available cluster in Route 53 ARC. A cluster provides a set of five redundant Regional endpoints against which you can run API calls to get or update the state of routing controls. To implement failover, you set one routing control to ON and another one to OFF, to reroute traffic from one Availability Zone or Amazon Web Services Region to another.

Be aware that you must specify a Regional endpoint for a cluster when you work with API cluster operations to get or update routing control states in Route 53 ARC. In addition, you must specify the US West (Oregon) Region for Route 53 ARC API calls. For example, use the parameter --region us-west-2 with AWS CLI commands. For more information, see Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.

This API guide includes information about the API operations for how to get and update routing control states in Route 53 ARC. To work with routing control in Route 53 ARC, you must first create the required components (clusters, control panels, and routing controls) using the recovery cluster configuration API.

For more information about working with routing control in Route 53 ARC, see the following:

" } diff --git a/botocore/data/route53-recovery-control-config/2020-11-02/endpoint-rule-set-1.json b/botocore/data/route53-recovery-control-config/2020-11-02/endpoint-rule-set-1.json index 6d8889a092..1f41aff34b 100644 --- a/botocore/data/route53-recovery-control-config/2020-11-02/endpoint-rule-set-1.json +++ b/botocore/data/route53-recovery-control-config/2020-11-02/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -328,9 +326,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/route53-recovery-control-config/2020-11-02/service-2.json b/botocore/data/route53-recovery-control-config/2020-11-02/service-2.json index 49d5321a53..f3d51c1c23 100644 --- a/botocore/data/route53-recovery-control-config/2020-11-02/service-2.json +++ b/botocore/data/route53-recovery-control-config/2020-11-02/service-2.json @@ -483,6 +483,32 @@ ], "documentation": "

Returns information about a safety rule.

" }, + "GetResourcePolicy": { + "name": "GetResourcePolicy", + "http": { + "method": "GET", + "requestUri": "/resourcePolicy/{ResourceArn}", + "responseCode": 200 + }, + "input": { + "shape": "GetResourcePolicyRequest" + }, + "output": { + "shape": "GetResourcePolicyResponse", + "documentation": "

200 response - Success.

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response - MalformedQueryString. The query string contains a syntax error or resource not found.

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response - InternalServiceError. Temporary service error. Retry the request.

" + } + ], + "documentation": "

Get information about the resource policy for a cluster.

" + }, "ListAssociatedRoute53HealthChecks": { "name": "ListAssociatedRoute53HealthChecks", "http": { @@ -918,6 +944,10 @@ "WaitPeriodMs": { "shape": "__integer", "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + }, + "Owner": { + "shape": "__stringMin12Max12PatternD12", + "documentation": "

The Amazon Web Services account ID of the assertion rule owner.

" } }, "documentation": "

An assertion rule enforces that, when you change a routing control state, that the criteria that you set in the rule configuration is met. Otherwise, the change to the routing control is not accepted. For example, the criteria might be that at least one routing control state is On after the transaction so that traffic continues to flow to at least one cell for the application. This ensures that you avoid a fail-open scenario.

", @@ -972,6 +1002,10 @@ "Status": { "shape": "Status", "documentation": "

Deployment status of a resource. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + }, + "Owner": { + "shape": "__stringMin12Max12PatternD12", + "documentation": "

The Amazon Web Services account ID of the cluster owner.

" } }, "documentation": "

A set of five redundant Regional endpoints against which you can execute API calls to update or get the state of routing controls. You can host multiple control panels and routing controls on one cluster.

" @@ -1033,6 +1067,10 @@ "Status": { "shape": "Status", "documentation": "

The deployment status of control panel. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + }, + "Owner": { + "shape": "__stringMin12Max12PatternD12", + "documentation": "

The Amazon Web Services account ID of the control panel owner.

" } }, "documentation": "

A control panel represents a group of routing controls that can be changed together in a single transaction.

" @@ -1378,6 +1416,10 @@ "WaitPeriodMs": { "shape": "__integer", "documentation": "

An evaluation period, in milliseconds (ms), during which any request against the target routing controls will fail. This helps prevent \"flapping\" of state. The wait period is 5000 ms by default, but you can choose a custom value.

" + }, + "Owner": { + "shape": "__stringMin12Max12PatternD12", + "documentation": "

The Amazon Web Services account ID of the gating rule owner.

" } }, "documentation": "

A gating rule verifies that a gating routing control or set of gating routing controls, evaluates as true, based on a rule configuration that you specify, which allows a set of routing control state changes to complete.

For example, if you specify one gating routing control and you set the Type in the rule configuration to OR, that indicates that you must set the gating routing control to On for the rule to evaluate as true; that is, for the gating control \"switch\" to be \"On\". When you do that, then you can update the routing control states for the target routing controls that you specify in the gating rule.

", @@ -1415,6 +1457,29 @@ "Name" ] }, + "GetResourcePolicyRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "ResourceArn", + "documentation": "

The Amazon Resource Name (ARN) of the resource.

" + } + }, + "required": [ + "ResourceArn" + ] + }, + "GetResourcePolicyResponse": { + "type": "structure", + "members": { + "Policy": { + "shape": "__policy", + "documentation": "

The resource policy.

" + } + } + }, "InternalServerException": { "type": "structure", "members": { @@ -1749,6 +1814,10 @@ "Status": { "shape": "Status", "documentation": "

The deployment status of a routing control. Status can be one of the following: PENDING, DEPLOYED, PENDING_DELETION.

" + }, + "Owner": { + "shape": "__stringMin12Max12PatternD12", + "documentation": "

The Amazon Web Services account ID of the routing control owner.

" } }, "documentation": "

A routing control has one of two states: ON and OFF. You can map the routing control state to the state of an Amazon Route 53 health check, which can be used to control traffic routing.

" @@ -2072,6 +2141,12 @@ "max": 256, "pattern": "^\\S+$" }, + "__stringMin12Max12PatternD12": { + "type": "string", + "min": 12, + "max": 12, + "pattern": "^\\d{12}$" + }, "__stringMin1Max128PatternAZaZ09": { "type": "string", "min": 1, @@ -2102,6 +2177,12 @@ "max": 8096, "pattern": "[\\S]*" }, + "__policy": { + "type": "string", + "min": 2, + "max": 10240, + "pattern": "[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]+" + }, "__timestampIso8601": { "type": "timestamp", "timestampFormat": "iso8601"