diff --git a/CHANGELOG.md b/CHANGELOG.md index d0b1a933464..eae55b14f18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.38.71 (2021-07-01) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * Adding a new reserved field to support future infrastructure improvements for Amazon EC2 Fleet. +* `service/sagemaker`: Updates service API and documentation + * SageMaker model registry now supports up to 5 containers and associated environment variables. +* `service/sqs`: Updates service documentation + * Documentation updates for Amazon SQS. + Release v1.38.70 (2021-06-30) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 887c21dcb1c..27797a72e98 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -982,6 +982,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3047,6 +3048,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6737,6 +6739,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, diff --git a/aws/version.go b/aws/version.go index d1d8732c68d..95a41a03b2b 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.38.70" +const SDKVersion = "1.38.71" diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 2b801505816..e853f3a442f 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -7763,7 +7763,8 @@ "TagSpecifications":{ "shape":"TagSpecificationList", "locationName":"TagSpecification" - } + }, + "Context":{"shape":"String"} } }, "CreateFleetResult":{ @@ -16819,6 +16820,10 @@ "Instances":{ "shape":"DescribeFleetsInstancesSet", "locationName":"fleetInstanceSet" + }, + "Context":{ + "shape":"String", + "locationName":"context" } } }, @@ -22830,7 +22835,8 @@ "locationName":"LaunchTemplateConfig" }, "FleetId":{"shape":"FleetId"}, - "TargetCapacitySpecification":{"shape":"TargetCapacitySpecificationRequest"} + "TargetCapacitySpecification":{"shape":"TargetCapacitySpecificationRequest"}, + "Context":{"shape":"String"} } }, "ModifyFleetResult":{ @@ -23312,7 +23318,8 @@ "shape":"Integer", "locationName":"targetCapacity" }, - "OnDemandTargetCapacity":{"shape":"Integer"} + "OnDemandTargetCapacity":{"shape":"Integer"}, + "Context":{"shape":"String"} } }, "ModifySpotFleetRequestResponse":{ @@ -27580,6 +27587,7 @@ "network-acl", "network-interface", "network-insights-analysis", + "network-insights-boundary", "network-insights-path", "placement-group", "reserved-instances", @@ -29516,6 +29524,10 @@ "shape":"Integer", "locationName":"instancePoolsToUseCount" }, + "Context":{ + "shape":"String", + "locationName":"context" + }, "TagSpecifications":{ "shape":"TagSpecificationList", "locationName":"TagSpecification" diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 4b65a74db46..4b2d62a6e55 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -82,7 +82,7 @@ "CreateSecurityGroup": "

Creates a security group.

A security group acts as a virtual firewall for your instance to control inbound and outbound traffic. For more information, see Amazon EC2 Security Groups in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

When you create a security group, you specify a friendly name of your choice. You can have a security group for use in EC2-Classic with the same name as a security group for use in a VPC. However, you can't have two security groups for use in EC2-Classic with the same name or two security groups for use in a VPC with the same name.

You have a default security group for use in EC2-Classic and a default security group for use in your VPC. If you don't specify a security group when you launch an instance, the instance is launched into the appropriate default security group. A default security group includes a default rule that grants instances unrestricted network access to each other.

You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.

For more information about VPC security group limits, see Amazon VPC Limits.

", "CreateSnapshot": "

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

", "CreateSnapshots": "

Creates crash-consistent snapshots of multiple EBS volumes and stores the data in S3. Volumes are chosen by specifying an instance. Any attached volumes will produce one snapshot each that is crash-consistent across the instance. Boot volumes can be excluded by changing the parameters.

You can create multi-volume snapshots of instances in a Region and instances on an Outpost. If you create snapshots from an instance in a Region, the snapshots must be stored in the same Region as the instance. If you create snapshots from an instance on an Outpost, the snapshots can be stored on the same Outpost as the instance, or in the Region for that Outpost.

", - "CreateSpotDatafeedSubscription": "

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

", + "CreateSpotDatafeedSubscription": "

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

", "CreateStoreImageTask": "

Stores an AMI as a single object in an S3 bucket.

To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the Amazon Elastic Compute Cloud User Guide.

For more information, see Store and restore an AMI using S3 in the Amazon Elastic Compute Cloud User Guide.

", "CreateSubnet": "

Creates a subnet in a specified VPC.

You must specify an IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The allowed block size is between a /16 netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR block must not overlap with the CIDR block of an existing subnet in the VPC.

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

When you stop an instance in a subnet, it retains its private IPv4 address. It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

", "CreateTags": "

Adds or overwrites only the specified tags for the specified Amazon EC2 resource or resources. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

", @@ -174,7 +174,7 @@ "DescribeAvailabilityZones": "

Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to you. If there is an event impacting a zone, you can use this request to view the state and any provided messages for that zone.

For more information about Availability Zones, Local Zones, and Wavelength Zones, see Regions, Zones and Outposts in the Amazon Elastic Compute Cloud User Guide.

", "DescribeBundleTasks": "

Describes the specified bundle tasks or all of your bundle tasks.

Completed bundle tasks are listed for only a limited time. If your bundle task is no longer in the list, you can still register an AMI from it. Just use RegisterImage with the Amazon S3 bucket name and image manifest name you provided to the bundle task.

", "DescribeByoipCidrs": "

Describes the IP address ranges that were specified in calls to ProvisionByoipCidr.

To describe the address pools that were created when you provisioned the address ranges, use DescribePublicIpv4Pools or DescribeIpv6Pools.

", - "DescribeCapacityReservations": "

Describes one or more of your Capacity Reservations. The results describe only the Capacity Reservations in the AWS Region that you're currently using.

", + "DescribeCapacityReservations": "

Describes one or more of your Capacity Reservations. The results describe only the Capacity Reservations in the Region that you're currently using.

", "DescribeCarrierGateways": "

Describes one or more of your carrier gateways.

", "DescribeClassicLinkInstances": "

Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot use this request to return information about other instances.

", "DescribeClientVpnAuthorizationRules": "

Describes the authorization rules for a specified Client VPN endpoint.

", @@ -328,7 +328,7 @@ "ExportTransitGatewayRoutes": "

Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.

The routes are saved to the specified bucket in a JSON file. For more information, see Export Route Tables to Amazon S3 in Transit Gateways.

", "GetAssociatedEnclaveCertificateIamRoles": "

Returns the IAM roles that are associated with the specified AWS Certificate Manager (ACM) certificate. It also returns the name of the Amazon S3 bucket and the Amazon S3 object key where the certificate, certificate chain, and encrypted private key bundle are stored, and the ARN of the AWS Key Management Service (KMS) customer master key (CMK) that's used to encrypt the private key.

", "GetAssociatedIpv6PoolCidrs": "

Gets information about the IPv6 CIDR block associations for a specified IPv6 address pool.

", - "GetCapacityReservationUsage": "

Gets usage information about a Capacity Reservation. If the Capacity Reservation is shared, it shows usage information for the Capacity Reservation owner and each AWS account that is currently using the shared capacity. If the Capacity Reservation is not shared, it shows only the Capacity Reservation owner's usage.

", + "GetCapacityReservationUsage": "

Gets usage information about a Capacity Reservation. If the Capacity Reservation is shared, it shows usage information for the Capacity Reservation owner and each account that is currently using the shared capacity. If the Capacity Reservation is not shared, it shows only the Capacity Reservation owner's usage.

", "GetCoipPoolUsage": "

Describes the allocations from the specified customer-owned address pool.

", "GetConsoleOutput": "

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post. Only the most recent 64 KB of console output is available.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is supported on instance types that use the Nitro hypervisor.

For more information, see Instance console output in the Amazon EC2 User Guide.

", "GetConsoleScreenshot": "

Retrieve a JPG-format screenshot of a running instance to help with troubleshooting.

The returned content is Base64-encoded.

", @@ -359,7 +359,7 @@ "ModifyAvailabilityZoneGroup": "

Changes the opt-in status of the Local Zone and Wavelength Zone group for your account.

Use DescribeAvailabilityZones to view the value for GroupName.

", "ModifyCapacityReservation": "

Modifies a Capacity Reservation's capacity and the conditions under which it is to be released. You cannot change a Capacity Reservation's instance type, EBS optimization, instance store settings, platform, Availability Zone, or instance eligibility. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with the required attributes.

", "ModifyClientVpnEndpoint": "

Modifies the specified Client VPN endpoint. Modifying the DNS server resets existing client connections.

", - "ModifyDefaultCreditSpecification": "

Modifies the default credit option for CPU usage of burstable performance instances. The default credit option is set at the account level per AWS Region, and is specified per instance family. All new burstable performance instances in the account launch using the default credit option.

ModifyDefaultCreditSpecification is an asynchronous operation, which works at an AWS Region level and modifies the credit option for each Availability Zone. All zones in a Region are updated within five minutes. But if instances are launched during this operation, they might not get the new credit option until the zone is updated. To verify whether the update has occurred, you can call GetDefaultCreditSpecification and check DefaultCreditSpecification for updates.

For more information, see Burstable performance instances in the Amazon EC2 User Guide.

", + "ModifyDefaultCreditSpecification": "

Modifies the default credit option for CPU usage of burstable performance instances. The default credit option is set at the account level per Region, and is specified per instance family. All new burstable performance instances in the account launch using the default credit option.

ModifyDefaultCreditSpecification is an asynchronous operation, which works at an Region level and modifies the credit option for each Availability Zone. All zones in a Region are updated within five minutes. But if instances are launched during this operation, they might not get the new credit option until the zone is updated. To verify whether the update has occurred, you can call GetDefaultCreditSpecification and check DefaultCreditSpecification for updates.

For more information, see Burstable performance instances in the Amazon EC2 User Guide.

", "ModifyEbsDefaultKmsKeyId": "

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a symmetric customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric CMKs.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

", "ModifyFleet": "

Modifies the specified EC2 Fleet.

You can only modify an EC2 Fleet request of type maintain.

While the EC2 Fleet is being modified, it is in the modifying state.

To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches the additional Spot Instances according to the allocation strategy for the EC2 Fleet request. If the allocation strategy is lowest-price, the EC2 Fleet launches instances using the Spot Instance pool with the lowest price. If the allocation strategy is diversified, the EC2 Fleet distributes the instances across the Spot Instance pools. If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 Fleet cancels any open requests that exceed the new target capacity. You can request that the EC2 Fleet terminate Spot Instances until the size of the fleet no longer exceeds the new target capacity. If the allocation strategy is lowest-price, the EC2 Fleet terminates the instances with the highest price per unit. If the allocation strategy is capacity-optimized, the EC2 Fleet terminates the instances in the Spot Instance pools that have the least available Spot Instance capacity. If the allocation strategy is diversified, the EC2 Fleet terminates instances across the Spot Instance pools. Alternatively, you can request that the EC2 Fleet keep the fleet at its current size, but not replace any Spot Instances that are interrupted or that you terminate manually.

If you are finished with your EC2 Fleet for now, but will use it again later, you can set the target capacity to 0.

", "ModifyFpgaImageAttribute": "

Modifies the specified attribute of the specified Amazon FPGA Image (AFI).

", @@ -444,12 +444,12 @@ "SearchTransitGatewayMulticastGroups": "

Searches one or more transit gateway multicast groups and returns the group membership information.

", "SearchTransitGatewayRoutes": "

Searches for routes in the specified transit gateway route table.

", "SendDiagnosticInterrupt": "

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).

", - "StartInstances": "

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping instances in the Amazon EC2 User Guide.

", + "StartInstances": "

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping instances in the Amazon EC2 User Guide.

", "StartNetworkInsightsAnalysis": "

Starts analyzing the specified path. If the path is reachable, the operation returns the shortest feasible path.

", "StartVpcEndpointServicePrivateDnsVerification": "

Initiates the verification process to prove that the service provider owns the private DNS name domain for the endpoint service.

The service provider must successfully perform the verification before the consumer can use the name to access the service.

Before the service provider runs this command, they must add a record to the DNS server. For more information, see Adding a TXT Record to Your Domain's DNS Server in the Amazon VPC User Guide.

", - "StopInstances": "

Stops an Amazon EBS-backed instance.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting stopping your instance in the Amazon EC2 User Guide.

", + "StopInstances": "

Stops an Amazon EBS-backed instance.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting stopping your instance in the Amazon EC2 User Guide.

", "TerminateClientVpnConnections": "

Terminates active Client VPN endpoint connections. This action can be used to terminate a specific client connection, or up to five connections established by a specific user.

", - "TerminateInstances": "

Shuts down the specified instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

Terminated instances remain visible after termination (for approximately one hour).

By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

For more information about troubleshooting, see Troubleshooting terminating your instance in the Amazon EC2 User Guide.

", + "TerminateInstances": "

Shuts down the specified instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

If you terminate multiple instances across multiple Availability Zones, and one or more of the specified instances are enabled for termination protection, the request fails with the following results:

For example, say you have the following instances:

If you attempt to terminate all of these instances in the same request, the request reports failure with the following results:

Terminated instances remain visible after termination (for approximately one hour).

By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

For more information about troubleshooting, see Troubleshooting terminating your instance in the Amazon EC2 User Guide.

", "UnassignIpv6Addresses": "

Unassigns one or more IPv6 addresses from a network interface.

", "UnassignPrivateIpAddresses": "

Unassigns one or more secondary private IP addresses from a network interface.

", "UnmonitorInstances": "

Disables detailed monitoring for a running instance. For more information, see Monitoring your instances and volumes in the Amazon EC2 User Guide.

", @@ -1332,7 +1332,7 @@ "BlobAttributeValue": { "base": null, "refs": { - "ModifyInstanceAttributeRequest$UserData": "

Changes the instance's user data to the specified value. If you are using an AWS SDK or command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.

" + "ModifyInstanceAttributeRequest$UserData": "

Changes the instance's user data to the specified value. If you are using an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.

" } }, "BlockDeviceMapping": { @@ -1724,8 +1724,8 @@ "EnableVpcClassicLinkDnsSupportResult$Return": "

Returns true if the request succeeds; otherwise, it returns an error.

", "EnableVpcClassicLinkRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "EnableVpcClassicLinkResult$Return": "

Returns true if the request succeeds; otherwise, it returns an error.

", - "EnclaveOptions$Enabled": "

If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; otherwise, it is not enabled for AWS Nitro Enclaves.

", - "EnclaveOptionsRequest$Enabled": "

To enable the instance for AWS Nitro Enclaves, set this parameter to true.

", + "EnclaveOptions$Enabled": "

If this parameter is set to true, the instance is enabled for Amazon Web Services Nitro Enclaves; otherwise, it is not enabled for Amazon Web Services Nitro Enclaves.

", + "EnclaveOptionsRequest$Enabled": "

To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter to true.

", "ExportClientVpnClientCertificateRevocationListRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "ExportClientVpnClientConfigurationRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "ExportImageRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -1921,7 +1921,7 @@ "RequestSpotFleetRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "RequestSpotInstancesRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "RequestSpotLaunchSpecification$EbsOptimized": "

Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance.

Default: false

", - "ReservedInstancesOffering$Marketplace": "

Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, this is true.

", + "ReservedInstancesOffering$Marketplace": "

Indicates whether the offering is available through the Reserved Instance Marketplace (resale) or Amazon Web Services. If it's a Reserved Instance Marketplace offering, this is true.

", "ResetAddressAttributeRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "ResetEbsDefaultKmsKeyIdRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "ResetFpgaImageAttributeRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -2347,8 +2347,8 @@ "CapacityReservationTenancy": { "base": null, "refs": { - "CapacityReservation$Tenancy": "

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

", - "CreateCapacityReservationRequest$Tenancy": "

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

" + "CapacityReservation$Tenancy": "

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

", + "CreateCapacityReservationRequest$Tenancy": "

Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:

" } }, "CarrierGateway": { @@ -2982,7 +2982,7 @@ "CreateFleetErrorsSet": { "base": null, "refs": { - "CreateFleetResult$Errors": "

Information about the instances that could not be launched by the fleet. Valid only when Type is set to instant.

" + "CreateFleetResult$Errors": "

Information about the instances that could not be launched by the fleet. Supported only for fleets of type instant.

" } }, "CreateFleetInstance": { @@ -2994,7 +2994,7 @@ "CreateFleetInstancesSet": { "base": null, "refs": { - "CreateFleetResult$Instances": "

Information about the instances that were launched by the fleet. Valid only when Type is set to instant.

" + "CreateFleetResult$Instances": "

Information about the instances that were launched by the fleet. Supported only for fleets of type instant.

" } }, "CreateFleetRequest": { @@ -6623,16 +6623,16 @@ } }, "EnclaveOptions": { - "base": "

Indicates whether the instance is enabled for AWS Nitro Enclaves.

", + "base": "

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

", "refs": { - "Instance$EnclaveOptions": "

Indicates whether the instance is enabled for AWS Nitro Enclaves.

", - "InstanceAttribute$EnclaveOptions": "

To enable the instance for AWS Nitro Enclaves, set this parameter to true; otherwise, set it to false.

" + "Instance$EnclaveOptions": "

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

", + "InstanceAttribute$EnclaveOptions": "

To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter to true; otherwise, set it to false.

" } }, "EnclaveOptionsRequest": { - "base": "

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

", + "base": "

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

", "refs": { - "RunInstancesRequest$EnclaveOptions": "

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

You can't enable AWS Nitro Enclaves and hibernation on the same instance.

" + "RunInstancesRequest$EnclaveOptions": "

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" } }, "EndDateType": { @@ -6883,7 +6883,7 @@ "DescribeAddressesRequest$Filters": "

One or more filters. Filter names and values are case-sensitive.

", "DescribeAvailabilityZonesRequest$Filters": "

The filters.

", "DescribeBundleTasksRequest$Filters": "

The filters.

", - "DescribeCapacityReservationsRequest$Filters": "

One or more filters.

", + "DescribeCapacityReservationsRequest$Filters": "

One or more filters.

", "DescribeCarrierGatewaysRequest$Filters": "

One or more filters.

", "DescribeClassicLinkInstancesRequest$Filters": "

One or more filters.

", "DescribeClientVpnAuthorizationRulesRequest$Filters": "

One or more filters. Filter names and values are case-sensitive.

", @@ -6914,7 +6914,7 @@ "DescribeInstanceStatusRequest$Filters": "

The filters.

", "DescribeInstanceTypeOfferingsRequest$Filters": "

One or more filters. Filter names and values are case-sensitive.

", "DescribeInstanceTypesRequest$Filters": "

One or more filters. Filter names and values are case-sensitive.

", - "DescribeInstancesRequest$Filters": "

The filters.

", + "DescribeInstancesRequest$Filters": "

The filters.

", "DescribeInternetGatewaysRequest$Filters": "

One or more filters.

", "DescribeIpv6PoolsRequest$Filters": "

One or more filters.

", "DescribeKeyPairsRequest$Filters": "

The filters.

", @@ -6941,7 +6941,7 @@ "DescribeReplaceRootVolumeTasksRequest$Filters": "

Filter to use:

", "DescribeReservedInstancesListingsRequest$Filters": "

One or more filters.

", "DescribeReservedInstancesModificationsRequest$Filters": "

One or more filters.

", - "DescribeReservedInstancesOfferingsRequest$Filters": "

One or more filters.

", + "DescribeReservedInstancesOfferingsRequest$Filters": "

One or more filters.

", "DescribeReservedInstancesRequest$Filters": "

One or more filters.

", "DescribeRouteTablesRequest$Filters": "

One or more filters.

", "DescribeScheduledInstanceAvailabilityRequest$Filters": "

The filters.

", @@ -7166,7 +7166,7 @@ "FleetType": { "base": null, "refs": { - "CreateFleetRequest$Type": "

The type of request. The default value is maintain.

For more information, see EC2 Fleet request types in the Amazon EC2 User Guide.

", + "CreateFleetRequest$Type": "

The fleet type. The default value is maintain.

For more information, see EC2 Fleet request types in the Amazon EC2 User Guide.

", "FleetData$Type": "

The type of request. Indicates whether the EC2 Fleet only requests the target capacity, or also attempts to maintain it. If you request a certain target capacity, EC2 Fleet only places the required requests; it does not attempt to replenish instances if capacity is diminished, and it does not submit requests in alternative capacity pools if capacity is unavailable. To maintain a certain target capacity, EC2 Fleet places the required requests to meet this target capacity. It also automatically replenishes any interrupted Spot Instances. Default: maintain.

", "SpotFleetRequestConfigData$Type": "

The type of request. Indicates whether the Spot Fleet only requests the target capacity or also attempts to maintain it. When this value is request, the Spot Fleet only places the required requests. It does not attempt to replenish Spot Instances if capacity is diminished, nor does it submit requests in alternative Spot pools if capacity is not available. When this value is maintain, the Spot Fleet maintains the target capacity. The Spot Fleet places the required requests to meet capacity and automatically replenishes any interrupted instances. Default: maintain. instant is listed but is not used by Spot Fleet.

" } @@ -7693,7 +7693,7 @@ "HibernationOptionsRequest": { "base": "

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

", "refs": { - "RunInstancesRequest$HibernationOptions": "

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

You can't enable hibernation and AWS Nitro Enclaves on the same instance.

" + "RunInstancesRequest$HibernationOptions": "

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance.

" } }, "HistoryRecord": { @@ -8836,7 +8836,7 @@ "InstanceNetworkInterfaceSpecification$SecondaryPrivateIpAddressCount": "

The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a RunInstances request.

", "InstanceNetworkInterfaceSpecification$NetworkCardIndex": "

The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0.

", "InstanceState$Code": "

The state of the instance as a 16-bit unsigned integer.

The high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal values between 256 and 65,535. These numerical values are used for internal purposes and should be ignored.

The low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal values between 0 and 255.

The valid values for instance-state-code will all be in the range of the low byte and they are:

You can ignore the high byte value by zeroing out all of the bits above 2^8 or 256 in decimal.

", - "InstanceUsage$UsedInstanceCount": "

The number of instances the AWS account currently has in the Capacity Reservation.

", + "InstanceUsage$UsedInstanceCount": "

The number of instances the account currently has in the Capacity Reservation.

", "IpPermission$FromPort": "

The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type number. A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify all codes.

", "IpPermission$ToPort": "

The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify all codes.

", "LaunchTemplateCpuOptions$CoreCount": "

The number of CPU cores for the instance.

", @@ -8914,7 +8914,7 @@ "PurchaseRequest$InstanceCount": "

The number of instances.

", "PurchaseReservedInstancesOfferingRequest$InstanceCount": "

The number of Reserved Instances to purchase.

", "ReplaceNetworkAclEntryRequest$RuleNumber": "

The rule number of the entry to replace.

", - "RequestSpotInstancesRequest$BlockDurationMinutes": "

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

The duration period starts as soon as your Spot Instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

You can't specify an Availability Zone group or a launch group if you specify a duration.

New accounts or accounts with no previous billing history with AWS are not eligible for Spot Instances with a defined duration (also known as Spot blocks).

", + "RequestSpotInstancesRequest$BlockDurationMinutes": "

Deprecated.

", "RequestSpotInstancesRequest$InstanceCount": "

The maximum number of Spot Instances to launch.

Default: 1

", "ReservedInstances$InstanceCount": "

The number of reservations purchased.

", "ReservedInstancesConfiguration$InstanceCount": "

The number of modified Reserved Instances.

This is a required field for a request.

", @@ -8945,12 +8945,12 @@ "SnapshotInfo$VolumeSize": "

Size of the volume from which this snapshot was created.

", "SpotFleetRequestConfigData$TargetCapacity": "

The number of units to request for the Spot Fleet. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

", "SpotFleetRequestConfigData$OnDemandTargetCapacity": "

The number of On-Demand units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

", - "SpotFleetRequestConfigData$InstancePoolsToUseCount": "

The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot AllocationStrategy is set to lowest-price. Spot Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

", - "SpotInstanceRequest$BlockDurationMinutes": "

The duration for the Spot Instance, in minutes.

", - "SpotMarketOptions$BlockDurationMinutes": "

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

The duration period starts as soon as your Spot Instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

You can't specify an Availability Zone group or a launch group if you specify a duration.

New accounts or accounts with no previous billing history with AWS are not eligible for Spot Instances with a defined duration (also known as Spot blocks).

", - "SpotOptions$InstancePoolsToUseCount": "

The number of Spot pools across which to allocate your target Spot capacity. Valid only when AllocationStrategy is set to lowest-price. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

", + "SpotFleetRequestConfigData$InstancePoolsToUseCount": "

The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot AllocationStrategy is set to lowest-price. Spot Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

Note that Spot Fleet attempts to draw Spot Instances from the number of pools that you specify on a best effort basis. If a pool runs out of Spot capacity before fulfilling your target capacity, Spot Fleet will continue to fulfill your request by drawing from the next cheapest pool. To ensure that your target capacity is met, you might receive Spot Instances from more than the number of pools that you specified. Similarly, if most of the pools have no Spot capacity, you might receive your full target capacity from fewer than the number of pools that you specified.

", + "SpotInstanceRequest$BlockDurationMinutes": "

Deprecated.

", + "SpotMarketOptions$BlockDurationMinutes": "

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

The duration period starts as soon as your Spot Instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

You can't specify an Availability Zone group or a launch group if you specify a duration.

New accounts or accounts with no previous billing history with Amazon Web Services are not eligible for Spot Instances with a defined duration (also known as Spot blocks).

", + "SpotOptions$InstancePoolsToUseCount": "

The number of Spot pools across which to allocate your target Spot capacity. Valid only when AllocationStrategy is set to lowest-price. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

Note that EC2 Fleet attempts to draw Spot Instances from the number of pools that you specify on a best effort basis. If a pool runs out of Spot capacity before fulfilling your target capacity, EC2 Fleet will continue to fulfill your request by drawing from the next cheapest pool. To ensure that your target capacity is met, you might receive Spot Instances from more than the number of pools that you specified. Similarly, if most of the pools have no Spot capacity, you might receive your full target capacity from fewer than the number of pools that you specified.

", "SpotOptions$MinTargetCapacity": "

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

", - "SpotOptionsRequest$InstancePoolsToUseCount": "

The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

", + "SpotOptionsRequest$InstancePoolsToUseCount": "

The number of Spot pools across which to allocate your target Spot capacity. Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet selects the cheapest Spot pools and evenly allocates your target Spot capacity across the number of Spot pools that you specify.

Note that EC2 Fleet attempts to draw Spot Instances from the number of pools that you specify on a best effort basis. If a pool runs out of Spot capacity before fulfilling your target capacity, EC2 Fleet will continue to fulfill your request by drawing from the next cheapest pool. To ensure that your target capacity is met, you might receive Spot Instances from more than the number of pools that you specified. Similarly, if most of the pools have no Spot capacity, you might receive your full target capacity from fewer than the number of pools that you specified.

", "SpotOptionsRequest$MinTargetCapacity": "

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances.

", "StaleIpPermission$FromPort": "

The start of the port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

", "StaleIpPermission$ToPort": "

The end of the port range for the TCP and UDP protocols, or an ICMP type number. A value of -1 indicates all ICMP types.

", @@ -11945,7 +11945,7 @@ "RegisterImageRequest$RamdiskId": "

The ID of the RAM disk.

", "RequestLaunchTemplateData$RamDiskId": "

The ID of the RAM disk.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User Provided Kernels in the Amazon Elastic Compute Cloud User Guide.

", "RequestSpotLaunchSpecification$RamdiskId": "

The ID of the RAM disk.

", - "RunInstancesRequest$RamdiskId": "

The ID of the RAM disk to select. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, go to the AWS Resource Center and search for the kernel ID.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon EC2 User Guide.

", + "RunInstancesRequest$RamdiskId": "

The ID of the RAM disk to select. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, go to the Amazon Web Services Resource Center and search for the kernel ID.

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon EC2 User Guide.

", "ScheduledInstancesLaunchSpecification$RamdiskId": "

The ID of the RAM disk.

" } }, @@ -13710,13 +13710,13 @@ "CancelSpotFleetRequestsSuccessItem$SpotFleetRequestId": "

The ID of the Spot Fleet request.

", "CancelledSpotInstanceRequest$SpotInstanceRequestId": "

The ID of the Spot Instance request.

", "CapacityReservation$CapacityReservationId": "

The ID of the Capacity Reservation.

", - "CapacityReservation$OwnerId": "

The ID of the AWS account that owns the Capacity Reservation.

", + "CapacityReservation$OwnerId": "

The ID of the account that owns the Capacity Reservation.

", "CapacityReservation$CapacityReservationArn": "

The Amazon Resource Name (ARN) of the Capacity Reservation.

", "CapacityReservation$AvailabilityZoneId": "

The Availability Zone ID of the Capacity Reservation.

", "CapacityReservation$InstanceType": "

The type of instance for which the Capacity Reservation reserves capacity.

", "CapacityReservation$AvailabilityZone": "

The Availability Zone in which the capacity is reserved.

", "CapacityReservationGroup$GroupArn": "

The ARN of the resource group.

", - "CapacityReservationGroup$OwnerId": "

The ID of the AWS account that owns the resource group.

", + "CapacityReservationGroup$OwnerId": "

The ID of the account that owns the resource group.

", "CapacityReservationTarget$CapacityReservationResourceGroupArn": "

The ARN of the Capacity Reservation resource group in which to run the instance.

", "CapacityReservationTargetResponse$CapacityReservationId": "

The ID of the targeted Capacity Reservation.

", "CapacityReservationTargetResponse$CapacityReservationResourceGroupArn": "

The ARN of the targeted Capacity Reservation group.

", @@ -13770,7 +13770,7 @@ "CoipAddressUsage$AwsService": "

The AWS service.

", "CoipAddressUsage$CoIp": "

The customer-owned IP address.

", "ConfirmProductInstanceRequest$ProductCode": "

The product code. This must be a product code that you own.

", - "ConfirmProductInstanceResult$OwnerId": "

The AWS account ID of the instance owner. This is only present if the product code is attached to the instance.

", + "ConfirmProductInstanceResult$OwnerId": "

The account ID of the instance owner. This is only present if the product code is attached to the instance.

", "ConnectionLogOptions$CloudwatchLogGroup": "

The name of the CloudWatch Logs log group. Required if connection logging is enabled.

", "ConnectionLogOptions$CloudwatchLogStream": "

The name of the CloudWatch Logs log stream to which the connection data is published.

", "ConnectionLogResponseOptions$CloudwatchLogGroup": "

The name of the Amazon CloudWatch Logs log group to which connection logging data is published.

", @@ -13825,6 +13825,7 @@ "CreateFleetError$ErrorCode": "

The error code that indicates why the instance could not be launched. For more information about error codes, see Error Codes.

", "CreateFleetError$ErrorMessage": "

The error message that describes why the instance could not be launched. For more information about error messages, see Error Codes.

", "CreateFleetRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "CreateFleetRequest$Context": "

Reserved.

", "CreateFlowLogsRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", "CreateFlowLogsRequest$DeliverLogsPermissionArn": "

The ARN for the IAM role that permits Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.

If you specify LogDestinationType as s3, do not specify DeliverLogsPermissionArn or LogGroupName.

", "CreateFlowLogsRequest$LogGroupName": "

The name of a new or existing CloudWatch Logs log group where Amazon EC2 publishes your flow logs.

If you specify LogDestinationType as s3, do not specify DeliverLogsPermissionArn or LogGroupName.

", @@ -14230,6 +14231,7 @@ "FederatedAuthenticationRequest$SelfServiceSAMLProviderArn": "

The Amazon Resource Name (ARN) of the IAM SAML identity provider for the self-service portal.

", "Filter$Name": "

The name of the filter. Filter names are case-sensitive.

", "FleetData$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", + "FleetData$Context": "

Reserved.

", "FleetLaunchTemplateOverrides$MaxPrice": "

The maximum price per unit hour that you are willing to pay for a Spot Instance.

", "FleetLaunchTemplateOverrides$SubnetId": "

The ID of the subnet in which to launch the instances.

", "FleetLaunchTemplateOverrides$AvailabilityZone": "

The Availability Zone in which to launch the instances.

", @@ -14300,11 +14302,11 @@ "Host$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", "Host$HostId": "

The ID of the Dedicated Host.

", "Host$HostReservationId": "

The reservation ID of the Dedicated Host. This returns a null response if the Dedicated Host doesn't have an associated reservation.

", - "Host$OwnerId": "

The ID of the AWS account that owns the Dedicated Host.

", + "Host$OwnerId": "

The ID of the account that owns the Dedicated Host.

", "Host$AvailabilityZoneId": "

The ID of the Availability Zone in which the Dedicated Host is allocated.

", "HostInstance$InstanceId": "

The ID of instance that is running on the Dedicated Host.

", "HostInstance$InstanceType": "

The instance type (for example, m3.medium) of the running instance.

", - "HostInstance$OwnerId": "

The ID of the AWS account that owns the instance.

", + "HostInstance$OwnerId": "

The ID of the account that owns the instance.

", "HostOffering$HourlyPrice": "

The hourly price of the offering.

", "HostOffering$InstanceFamily": "

The instance family of the offering.

", "HostOffering$OfferingId": "

The ID of the offering.

", @@ -14458,7 +14460,7 @@ "InstanceStatus$InstanceId": "

The ID of the instance.

", "InstanceStatusEvent$Description": "

A description of the event.

After a scheduled event is completed, it can still be described for up to a week. If the event has been completed, this description starts with the following text: [Completed].

", "InstanceTagKeySet$member": null, - "InstanceUsage$AccountId": "

The ID of the AWS account that is making use of the Capacity Reservation.

", + "InstanceUsage$AccountId": "

The ID of the account that is making use of the Capacity Reservation.

", "InternetGateway$InternetGatewayId": "

The ID of the internet gateway.

", "InternetGateway$OwnerId": "

The ID of the AWS account that owns the internet gateway.

", "InternetGatewayAttachment$VpcId": "

The ID of the VPC.

", @@ -14568,6 +14570,7 @@ "ModifyClientVpnEndpointRequest$Description": "

A brief description of the Client VPN endpoint.

", "ModifyDefaultCreditSpecificationRequest$CpuCredits": "

The credit option for CPU usage of the instance family.

Valid Values: standard | unlimited

", "ModifyEbsDefaultKmsKeyIdResult$KmsKeyId": "

The Amazon Resource Name (ARN) of the default CMK for encryption by default.

", + "ModifyFleetRequest$Context": "

Reserved.

", "ModifyFpgaImageAttributeRequest$Description": "

A description for the AFI.

", "ModifyFpgaImageAttributeRequest$Name": "

A name for the AFI.

", "ModifyHostsRequest$InstanceType": "

Specifies the instance type to be supported by the Dedicated Host. Specify this parameter to modify a Dedicated Host to support only a specific instance type.

If you want to modify a Dedicated Host to support multiple instance types in its current instance family, omit this parameter and specify InstanceFamily instead. You cannot specify InstanceType and InstanceFamily in the same request.

", @@ -14587,6 +14590,7 @@ "ModifyManagedPrefixListRequest$PrefixListName": "

A name for the prefix list.

", "ModifyReservedInstancesRequest$ClientToken": "

A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", "ModifyReservedInstancesResult$ReservedInstancesModificationId": "

The ID for the modification.

", + "ModifySpotFleetRequestRequest$Context": "

Reserved.

", "ModifyTrafficMirrorFilterRuleRequest$DestinationCidrBlock": "

The destination CIDR block to assign to the Traffic Mirror rule.

", "ModifyTrafficMirrorFilterRuleRequest$SourceCidrBlock": "

The source CIDR block to assign to the Traffic Mirror rule.

", "ModifyTrafficMirrorFilterRuleRequest$Description": "

The description to assign to the Traffic Mirror rule.

", @@ -14734,7 +14738,7 @@ "PurchaseHostReservationResult$TotalHourlyPrice": "

The total hourly price of the reservation calculated per hour.

", "PurchaseHostReservationResult$TotalUpfrontPrice": "

The total amount charged to your account when you purchase the reservation.

", "PurchaseRequest$PurchaseToken": "

The purchase token.

", - "PurchaseReservedInstancesOfferingResult$ReservedInstancesId": "

The IDs of the purchased Reserved Instances.

", + "PurchaseReservedInstancesOfferingResult$ReservedInstancesId": "

The IDs of the purchased Reserved Instances. If your purchase crosses into a discounted pricing tier, the final Reserved Instances IDs might change. For more information, see Crossing pricing tiers in the Amazon Elastic Compute Cloud User Guide.

", "PurchaseScheduledInstancesRequest$ClientToken": "

Unique, case-sensitive identifier that ensures the idempotency of the request. For more information, see Ensuring Idempotency.

", "Region$Endpoint": "

The Region service endpoint.

", "Region$RegionName": "

The name of the Region.

", @@ -14773,8 +14777,8 @@ "RequestSpotLaunchSpecification$AddressingType": "

Deprecated.

", "RequestSpotLaunchSpecification$UserData": "

The Base64-encoded user data for the instance. User data is limited to 16 KB.

", "RequestSpotLaunchSpecificationSecurityGroupList$member": null, - "Reservation$OwnerId": "

The ID of the AWS account that owns the reservation.

", - "Reservation$RequesterId": "

The ID of the requester that launched the instances on your behalf (for example, AWS Management Console or Auto Scaling).

", + "Reservation$OwnerId": "

The ID of the account that owns the reservation.

", + "Reservation$RequesterId": "

The ID of the requester that launched the instances on your behalf (for example, Management Console or Auto Scaling).

", "Reservation$ReservationId": "

The ID of the reservation.

", "ReservationValue$HourlyPrice": "

The hourly rate of the reservation.

", "ReservationValue$RemainingTotalValue": "

The balance of the total value (the sum of remainingUpfrontValue + hourlyPrice * number of hours remaining).

", @@ -14936,23 +14940,24 @@ "SnapshotTaskDetail$StatusMessage": "

A detailed status message for the import snapshot task.

", "SnapshotTaskDetail$Url": "

The URL of the disk image from which the snapshot is created.

", "SpotDatafeedSubscription$Bucket": "

The name of the Amazon S3 bucket where the Spot Instance data feed is located.

", - "SpotDatafeedSubscription$OwnerId": "

The AWS account ID of the account.

", + "SpotDatafeedSubscription$OwnerId": "

The account ID of the account.

", "SpotDatafeedSubscription$Prefix": "

The prefix for the data feed files.

", "SpotFleetLaunchSpecification$AddressingType": "

Deprecated.

", "SpotFleetLaunchSpecification$ImageId": "

The ID of the AMI.

", "SpotFleetLaunchSpecification$KernelId": "

The ID of the kernel.

", "SpotFleetLaunchSpecification$KeyName": "

The name of the key pair.

", - "SpotFleetLaunchSpecification$RamdiskId": "

The ID of the RAM disk. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, refer to the AWS Resource Center and search for the kernel ID.

", + "SpotFleetLaunchSpecification$RamdiskId": "

The ID of the RAM disk. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, refer to the Amazon Web Services Resource Center and search for the kernel ID.

", "SpotFleetLaunchSpecification$SpotPrice": "

The maximum price per unit hour that you are willing to pay for a Spot Instance. If this value is not specified, the default is the Spot price specified for the fleet. To determine the Spot price per unit hour, divide the Spot price by the value of WeightedCapacity.

", "SpotFleetLaunchSpecification$SubnetId": "

The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".

", "SpotFleetLaunchSpecification$UserData": "

The Base64-encoded user data that instances use when starting up.

", "SpotFleetRequestConfig$SpotFleetRequestId": "

The ID of the Spot Fleet request.

", "SpotFleetRequestConfigData$ClientToken": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see Ensuring Idempotency.

", - "SpotFleetRequestConfigData$IamFleetRole": "

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that grants the Spot Fleet the permission to request, launch, terminate, and tag instances on your behalf. For more information, see Spot Fleet prerequisites in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate Spot Instances on your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration.

", + "SpotFleetRequestConfigData$IamFleetRole": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that grants the Spot Fleet the permission to request, launch, terminate, and tag instances on your behalf. For more information, see Spot Fleet prerequisites in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate Spot Instances on your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration.

", "SpotFleetRequestConfigData$SpotPrice": "

The maximum price per unit hour that you are willing to pay for a Spot Instance. The default is the On-Demand price.

", "SpotFleetRequestConfigData$OnDemandMaxTotalPrice": "

The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

", "SpotFleetRequestConfigData$SpotMaxTotalPrice": "

The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity.

", - "SpotInstanceRequest$ActualBlockHourlyPrice": "

If you specified a duration and your Spot Instance request was fulfilled, this is the fixed hourly price in effect for the Spot Instance while it runs.

", + "SpotFleetRequestConfigData$Context": "

Reserved.

", + "SpotInstanceRequest$ActualBlockHourlyPrice": "

Deprecated.

", "SpotInstanceRequest$AvailabilityZoneGroup": "

The Availability Zone group. If you specify the same Availability Zone group for all Spot Instance requests, all Spot Instances are launched in the same Availability Zone.

", "SpotInstanceRequest$LaunchGroup": "

The instance launch group. Launch groups are Spot Instances that launch together and terminate together.

", "SpotInstanceRequest$LaunchedAvailabilityZone": "

The Availability Zone in which the request is launched.

", @@ -15465,7 +15470,7 @@ "CreateCustomerGatewayRequest$TagSpecifications": "

The tags to apply to the customer gateway.

", "CreateDhcpOptionsRequest$TagSpecifications": "

The tags to assign to the DHCP option.

", "CreateEgressOnlyInternetGatewayRequest$TagSpecifications": "

The tags to assign to the egress-only internet gateway.

", - "CreateFleetRequest$TagSpecifications": "

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging your resources.

", + "CreateFleetRequest$TagSpecifications": "

The key-value pair for tagging the EC2 Fleet request on creation. For more information, see Tagging your resources.

If the fleet type is instant, specify a resource type of fleet to tag the fleet or instance to tag the instances at launch.

If the fleet type is maintain or request, specify a resource type of fleet to tag the fleet. You cannot specify a resource type of instance. To tag instances at launch, specify the tags in a launch template.

", "CreateFlowLogsRequest$TagSpecifications": "

The tags to apply to the flow logs.

", "CreateFpgaImageRequest$TagSpecifications": "

The tags to apply to the FPGA image during creation.

", "CreateImageRequest$TagSpecifications": "

The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the snapshots, or both.

If you specify other values for ResourceType, the request fails.

To tag an AMI or snapshot after it has been created, see CreateTags.

", diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 268d167a81d..3d2a3907d22 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -850,7 +850,10 @@ "method":"POST", "requestUri":"/" }, - "input":{"shape":"DeleteModelPackageGroupInput"} + "input":{"shape":"DeleteModelPackageGroupInput"}, + "errors":[ + {"shape":"ConflictException"} + ] }, "DeleteModelPackageGroupPolicy":{ "name":"DeleteModelPackageGroupPolicy", @@ -916,7 +919,10 @@ "method":"POST", "requestUri":"/" }, - "input":{"shape":"DeleteProjectInput"} + "input":{"shape":"DeleteProjectInput"}, + "errors":[ + {"shape":"ConflictException"} + ] }, "DeleteTags":{ "name":"DeleteTags", @@ -10646,13 +10652,14 @@ "Image":{"shape":"ContainerImage"}, "ImageDigest":{"shape":"ImageDigest"}, "ModelDataUrl":{"shape":"Url"}, - "ProductId":{"shape":"ProductId"} + "ProductId":{"shape":"ProductId"}, + "Environment":{"shape":"EnvironmentMap"} } }, "ModelPackageContainerDefinitionList":{ "type":"list", "member":{"shape":"ModelPackageContainerDefinition"}, - "max":1, + "max":5, "min":1 }, "ModelPackageGroup":{ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 03472a86f67..521d6393634 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -3,51 +3,51 @@ "service": "

Provides APIs for creating and managing Amazon SageMaker resources.

Other Resources:

", "operations": { "AddAssociation": "

Creates an association between the source and the destination. A source can be associated with multiple destinations, and a destination can be associated with multiple sources. An association is a lineage tracking entity. For more information, see Amazon SageMaker ML Lineage Tracking.

", - "AddTags": "

Adds or overwrites one or more tags for the specified Amazon SageMaker resource. You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints.

Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see For more information, see AWS Tagging Strategies.

Tags that you add to a hyperparameter tuning job by calling this API are also added to any training jobs that the hyperparameter tuning job launches after you call this API, but not to training jobs that the hyperparameter tuning job launched before you called this API. To make sure that the tags associated with a hyperparameter tuning job are also added to all training jobs that the hyperparameter tuning job launches, add the tags when you first create the tuning job by specifying them in the Tags parameter of CreateHyperParameterTuningJob

Tags that you add to a SageMaker Studio Domain or User Profile by calling this API are also added to any Apps that the Domain or User Profile launches after you call this API, but not to Apps that the Domain or User Profile launched before you called this API. To make sure that the tags associated with a Domain or User Profile are also added to all Apps that the Domain or User Profile launches, add the tags when you first create the Domain or User Profile by specifying them in the Tags parameter of CreateDomain or CreateUserProfile.

", + "AddTags": "

Adds or overwrites one or more tags for the specified Amazon SageMaker resource. You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints.

Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see For more information, see Amazon Web Services Tagging Strategies.

Tags that you add to a hyperparameter tuning job by calling this API are also added to any training jobs that the hyperparameter tuning job launches after you call this API, but not to training jobs that the hyperparameter tuning job launched before you called this API. To make sure that the tags associated with a hyperparameter tuning job are also added to all training jobs that the hyperparameter tuning job launches, add the tags when you first create the tuning job by specifying them in the Tags parameter of CreateHyperParameterTuningJob

Tags that you add to a SageMaker Studio Domain or User Profile by calling this API are also added to any Apps that the Domain or User Profile launches after you call this API, but not to Apps that the Domain or User Profile launched before you called this API. To make sure that the tags associated with a Domain or User Profile are also added to all Apps that the Domain or User Profile launches, add the tags when you first create the Domain or User Profile by specifying them in the Tags parameter of CreateDomain or CreateUserProfile.

", "AssociateTrialComponent": "

Associates a trial component with a trial. A trial component can be associated with multiple trials. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.

", "CreateAction": "

Creates an action. An action is a lineage tracking entity that represents an action or activity. For example, a model deployment or an HPO job. Generally, an action involves at least one input or output artifact. For more information, see Amazon SageMaker ML Lineage Tracking.

CreateAction can only be invoked from within an SageMaker managed environment. This includes SageMaker training jobs, processing jobs, transform jobs, and SageMaker notebooks. A call to CreateAction from outside one of these environments results in an error.

", - "CreateAlgorithm": "

Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace.

", + "CreateAlgorithm": "

Create a machine learning algorithm that you can use in Amazon SageMaker and list in the Amazon Web Services Marketplace.

", "CreateApp": "

Creates a running app for the specified UserProfile. Supported apps are JupyterServer and KernelGateway. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

", "CreateAppImageConfig": "

Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, and a list of the kernels in the image.

", "CreateArtifact": "

Creates an artifact. An artifact is a lineage tracking entity that represents a URI addressable object or data. Some examples are the S3 URI of a dataset and the ECR registry path of an image. For more information, see Amazon SageMaker ML Lineage Tracking.

CreateArtifact can only be invoked from within an SageMaker managed environment. This includes SageMaker training jobs, processing jobs, transform jobs, and SageMaker notebooks. A call to CreateArtifact from outside one of these environments results in an error.

", "CreateAutoMLJob": "

Creates an Autopilot job.

Find the best performing model after you run an Autopilot job by calling .

For information about how to use Autopilot, see Automate Model Development with Amazon SageMaker Autopilot.

", - "CreateCodeRepository": "

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

", - "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", + "CreateCodeRepository": "

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository.

", + "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", "CreateContext": "

Creates a context. A context is a lineage tracking entity that represents a logical grouping of other tracking or experiment entities. Some examples are an endpoint and a model package. For more information, see Amazon SageMaker ML Lineage Tracking.

CreateContext can only be invoked from within an SageMaker managed environment. This includes SageMaker training jobs, processing jobs, transform jobs, and SageMaker notebooks. A call to CreateContext from outside one of these environments results in an error.

", "CreateDataQualityJobDefinition": "

Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor.

", "CreateDeviceFleet": "

Creates a device fleet.

", - "CreateDomain": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

EFS storage

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

SageMaker uses the AWS Key Management Service (AWS KMS) to encrypt the EFS volume attached to the domain with an AWS managed customer master key (CMK) by default. For more control, you can specify a customer managed CMK. For more information, see Protect Data at Rest Using Encryption.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to Studio. The following options are available:

For more information, see Connect SageMaker Studio Notebooks to Resources in a VPC.

", + "CreateDomain": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

EFS storage

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed customer master key (CMK) by default. For more control, you can specify a customer managed CMK. For more information, see Protect Data at Rest Using Encryption.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to Studio. The following options are available:

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a SageMaker Studio app successfully.

For more information, see Connect SageMaker Studio Notebooks to Resources in a VPC.

", "CreateEdgePackagingJob": "

Starts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the resulting artifacts to an S3 bucket that you specify.

", - "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

", - "CreateEndpointConfig": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

", - "CreateExperiment": "

Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

", - "CreateFeatureGroup": "

Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record.

The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check AWS service quotas to see the FeatureGroups quota for your AWS account.

You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup.

", + "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (Amazon Web Services SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

", + "CreateEndpointConfig": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (Amazon Web Services SDK for Python (Boto 3)).

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

", + "CreateExperiment": "

Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

", + "CreateFeatureGroup": "

Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record.

The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check Amazon Web Services service quotas to see the FeatureGroups quota for your Amazon Web Services account.

You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup.

", "CreateFlowDefinition": "

Creates a flow definition.

", "CreateHumanTaskUi": "

Defines the settings you will use for the human review workflow user interface. Reviewers will see a three-panel interface with an instruction area, the item to review, and an input area.

", "CreateHyperParameterTuningJob": "

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.

", "CreateImage": "

Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon Container Registry (ECR). For more information, see Bring your own SageMaker image.

", "CreateImageVersion": "

Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon Container Registry (ECR) container image specified by BaseImage.

", - "CreateLabelingJob": "

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

You can use this operation to create a static labeling job or a streaming labeling job. A static labeling job stops if all data objects in the input manifest file identified in ManifestS3Uri have been labeled. A streaming labeling job runs perpetually until it is manually stopped, or remains idle for 10 days. You can send new data objects to an active (InProgress) streaming labeling job in real time. To learn how to create a static labeling job, see Create a Labeling Job (API) in the Amazon SageMaker Developer Guide. To learn how to create a streaming labeling job, see Create a Streaming Labeling Job.

", - "CreateModel": "

Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions.

Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.

To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

To run a batch transform using your model, you start a job with the CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other AWS resources, you grant necessary permissions via this role.

", + "CreateLabelingJob": "

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

You can use this operation to create a static labeling job or a streaming labeling job. A static labeling job stops if all data objects in the input manifest file identified in ManifestS3Uri have been labeled. A streaming labeling job runs perpetually until it is manually stopped, or remains idle for 10 days. You can send new data objects to an active (InProgress) streaming labeling job in real time. To learn how to create a static labeling job, see Create a Labeling Job (API) in the Amazon SageMaker Developer Guide. To learn how to create a streaming labeling job, see Create a Streaming Labeling Job.

", + "CreateModel": "

Creates a model in Amazon SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions.

Use this API to create a model if you want to use Amazon SageMaker hosting services or run a batch transform job.

To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. Amazon SageMaker then deploys all of the containers that you defined for the model in the hosting environment.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (Amazon Web Services SDK for Python (Boto 3)).

To run a batch transform using your model, you start a job with the CreateTransformJob API. Amazon SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.

In the CreateModel request, you must define a container with the PrimaryContainer parameter.

In the request, you also provide an IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.

", "CreateModelBiasJobDefinition": "

Creates the definition for a model bias job.

", "CreateModelExplainabilityJobDefinition": "

Creates the definition for a model explainability job.

", - "CreateModelPackage": "

Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace, or a versioned model that is part of a model group. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification. To create a model from an algorithm resource that you created or subscribed to in AWS Marketplace, provide a value for SourceAlgorithmSpecification.

There are two types of model packages:

", + "CreateModelPackage": "

Creates a model package that you can use to create Amazon SageMaker models or list on Amazon Web Services Marketplace, or a versioned model that is part of a model group. Buyers can subscribe to model packages listed on Amazon Web Services Marketplace to create models in Amazon SageMaker.

To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification. To create a model from an algorithm resource that you created or subscribed to in Amazon Web Services Marketplace, provide a value for SourceAlgorithmSpecification.

There are two types of model packages:

", "CreateModelPackageGroup": "

Creates a model group. A model group contains a group of model versions.

", "CreateModelQualityJobDefinition": "

Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor.

", "CreateMonitoringSchedule": "

Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endoint.

", "CreateNotebookInstance": "

Creates an Amazon SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. Amazon SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

Amazon SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use Amazon SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, Amazon SageMaker does the following:

  1. Creates a network interface in the Amazon SageMaker VPC.

  2. (Option) If you specified SubnetId, Amazon SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, Amazon SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the Amazon SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, Amazon SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.

After Amazon SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating Amazon SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

", "CreateNotebookInstanceLifecycleConfig": "

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

", "CreatePipeline": "

Creates a pipeline using a JSON pipeline definition.

", - "CreatePresignedDomainUrl": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Studio, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the AWS console sign-in page.

", - "CreatePresignedNotebookInstanceUrl": "

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

", + "CreatePresignedDomainUrl": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Studio, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.

The IAM role or user used to call this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.

You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to SageMaker Studio Through an Interface VPC Endpoint .

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.

", + "CreatePresignedNotebookInstanceUrl": "

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page.

", "CreateProcessingJob": "

Creates a processing job.

", "CreateProject": "

Creates a machine learning (ML) project that can contain one or more templates that set up an ML pipeline from training to deploying an approved model.

", "CreateTrainingJob": "

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inference.

In the request body, you provide the following:

For more information about Amazon SageMaker, see How It Works.

", - "CreateTransformJob": "

Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

To perform batch transformations, you create a transform job and use the data that you have readily available.

In the request body, you provide the following:

For more information about how batch transformation works, see Batch Transform.

", - "CreateTrial": "

Creates an SageMaker trial. A trial is a set of steps called trial components that produce a machine learning model. A trial is part of a single SageMaker experiment.

When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial and then use the Search API to search for the tags.

To get a list of all your trials, call the ListTrials API. To view a trial's properties, call the DescribeTrial API. To create a trial component, call the CreateTrialComponent API.

", - "CreateTrialComponent": "

Creates a trial component, which is a stage of a machine learning trial. A trial is composed of one or more trial components. A trial component can be used in multiple trials.

Trial components include pre-processing jobs, training jobs, and batch transform jobs.

When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial component and then use the Search API to search for the tags.

CreateTrialComponent can only be invoked from within an SageMaker managed environment. This includes SageMaker training jobs, processing jobs, transform jobs, and SageMaker notebooks. A call to CreateTrialComponent from outside one of these environments results in an error.

", + "CreateTransformJob": "

Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

To perform batch transformations, you create a transform job and use the data that you have readily available.

In the request body, you provide the following:

For more information about how batch transformation works, see Batch Transform.

", + "CreateTrial": "

Creates an SageMaker trial. A trial is a set of steps called trial components that produce a machine learning model. A trial is part of a single SageMaker experiment.

When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial and then use the Search API to search for the tags.

To get a list of all your trials, call the ListTrials API. To view a trial's properties, call the DescribeTrial API. To create a trial component, call the CreateTrialComponent API.

", + "CreateTrialComponent": "

Creates a trial component, which is a stage of a machine learning trial. A trial is composed of one or more trial components. A trial component can be used in multiple trials.

Trial components include pre-processing jobs, training jobs, and batch transform jobs.

When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial component and then use the Search API to search for the tags.

", "CreateUserProfile": "

Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to Amazon SageMaker Studio. If an administrator invites a person by email or imports them from SSO, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System (EFS) home directory.

", - "CreateWorkforce": "

Use this operation to create a workforce. This operation will return an error if a workforce already exists in the AWS Region that you specify. You can only create one workforce in each AWS Region per AWS account.

If you want to create a new workforce in an AWS Region where a workforce already exists, use the API operation to delete the existing workforce and then use CreateWorkforce to create a new workforce.

To create a private workforce using Amazon Cognito, you must specify a Cognito user pool in CognitoConfig. You can also create an Amazon Cognito workforce using the Amazon SageMaker console. For more information, see Create a Private Workforce (Amazon Cognito).

To create a private workforce using your own OIDC Identity Provider (IdP), specify your IdP configuration in OidcConfig. Your OIDC IdP must support groups because groups are used by Ground Truth and Amazon A2I to create work teams. For more information, see Create a Private Workforce (OIDC IdP).

", + "CreateWorkforce": "

Use this operation to create a workforce. This operation will return an error if a workforce already exists in the Amazon Web Services Region that you specify. You can only create one workforce in each Amazon Web Services Region per Amazon Web Services account.

If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use the API operation to delete the existing workforce and then use CreateWorkforce to create a new workforce.

To create a private workforce using Amazon Cognito, you must specify a Cognito user pool in CognitoConfig. You can also create an Amazon Cognito workforce using the Amazon SageMaker console. For more information, see Create a Private Workforce (Amazon Cognito).

To create a private workforce using your own OIDC Identity Provider (IdP), specify your IdP configuration in OidcConfig. Your OIDC IdP must support groups because groups are used by Ground Truth and Amazon A2I to create work teams. For more information, see Create a Private Workforce (OIDC IdP).

", "CreateWorkteam": "

Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.

You cannot create more than 25 work teams in an account and region.

", "DeleteAction": "

Deletes an action.

", "DeleteAlgorithm": "

Removes the specified algorithm from your account.

", @@ -63,15 +63,15 @@ "DeleteEndpoint": "

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were deployed when the endpoint was created.

Amazon SageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't need to use the RevokeGrant API call.

", "DeleteEndpointConfig": "

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

", "DeleteExperiment": "

Deletes an SageMaker experiment. All trials associated with the experiment must be deleted first. Use the ListTrials API to get a list of the trials associated with the experiment.

", - "DeleteFeatureGroup": "

Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called.

Data written into the OfflineStore will not be deleted. The AWS Glue database and tables that are automatically created for your OfflineStore are not deleted.

", + "DeleteFeatureGroup": "

Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called.

Data written into the OfflineStore will not be deleted. The Amazon Web Services Glue database and tables that are automatically created for your OfflineStore are not deleted.

", "DeleteFlowDefinition": "

Deletes the specified flow definition.

", "DeleteHumanTaskUi": "

Use this operation to delete a human task user interface (worker task template).

To see a list of human task user interfaces (work task templates) in your account, use . When you delete a worker task template, it no longer appears when you call ListHumanTaskUis.

", "DeleteImage": "

Deletes a SageMaker image and all versions of the image. The container images aren't deleted.

", "DeleteImageVersion": "

Deletes a version of a SageMaker image. The container image the version represents isn't deleted.

", - "DeleteModel": "

Deletes a model. The DeleteModel API deletes only the model entry that was created in Amazon SageMaker when you called the CreateModel API. It does not delete model artifacts, inference code, or the IAM role that you specified when creating the model.

", + "DeleteModel": "

Deletes a model. The DeleteModel API deletes only the model entry that was created in Amazon SageMaker when you called the CreateModel API. It does not delete model artifacts, inference code, or the IAM role that you specified when creating the model.

", "DeleteModelBiasJobDefinition": "

Deletes an Amazon SageMaker model bias job definition.

", "DeleteModelExplainabilityJobDefinition": "

Deletes an Amazon SageMaker model explainability job definition.

", - "DeleteModelPackage": "

Deletes a model package.

A model package is used to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

", + "DeleteModelPackage": "

Deletes a model package.

A model package is used to create Amazon SageMaker models or list on Amazon Web Services Marketplace. Buyers can subscribe to model packages listed on Amazon Web Services Marketplace to create models in Amazon SageMaker.

", "DeleteModelPackageGroup": "

Deletes the specified model group.

", "DeleteModelPackageGroupPolicy": "

Deletes a model group resource policy.

", "DeleteModelQualityJobDefinition": "

Deletes the secified model quality monitoring job definition.

", @@ -84,7 +84,7 @@ "DeleteTrial": "

Deletes the specified trial. All trial components that make up the trial must be deleted first. Use the DescribeTrialComponent API to get the list of trial components.

", "DeleteTrialComponent": "

Deletes the specified trial component. A trial component must be disassociated from all trials before the trial component can be deleted. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.

", "DeleteUserProfile": "

Deletes a user profile. When a user profile is deleted, the user loses access to their EFS volume, including data, notebooks, and other artifacts.

", - "DeleteWorkforce": "

Use this operation to delete a workforce.

If you want to create a new workforce in an AWS Region where a workforce already exists, use this operation to delete the existing workforce and then use to create a new workforce.

If a private workforce contains one or more work teams, you must use the operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will recieve a ResourceInUse error.

", + "DeleteWorkforce": "

Use this operation to delete a workforce.

If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use this operation to delete the existing workforce and then use to create a new workforce.

If a private workforce contains one or more work teams, you must use the operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will recieve a ResourceInUse error.

", "DeleteWorkteam": "

Deletes an existing work team. This operation can't be undone.

", "DeregisterDevices": "

Deregisters the specified devices. After you deregister a device, you will need to re-register the devices.

", "DescribeAction": "

Describes an action.

", @@ -114,7 +114,7 @@ "DescribeModel": "

Describes a model that you created using the CreateModel API.

", "DescribeModelBiasJobDefinition": "

Returns a description of a model bias job definition.

", "DescribeModelExplainabilityJobDefinition": "

Returns a description of a model explainability job definition.

", - "DescribeModelPackage": "

Returns a description of the specified model package, which is used to create Amazon SageMaker models or list them on AWS Marketplace.

To create models in Amazon SageMaker, buyers can subscribe to model packages listed on AWS Marketplace.

", + "DescribeModelPackage": "

Returns a description of the specified model package, which is used to create Amazon SageMaker models or list them on Amazon Web Services Marketplace.

To create models in Amazon SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace.

", "DescribeModelPackageGroup": "

Gets a description for the specified model group.

", "DescribeModelQualityJobDefinition": "

Returns a description of a model quality job definition.

", "DescribeMonitoringSchedule": "

Describes the schedule for a monitoring job.

", @@ -125,7 +125,7 @@ "DescribePipelineExecution": "

Describes the details of a pipeline execution.

", "DescribeProcessingJob": "

Returns a description of a processing job.

", "DescribeProject": "

Describes the details of a project.

", - "DescribeSubscribedWorkteam": "

Gets information about a work team provided by a vendor. It returns details about the subscription with a vendor in the AWS Marketplace.

", + "DescribeSubscribedWorkteam": "

Gets information about a work team provided by a vendor. It returns details about the subscription with a vendor in the Amazon Web Services Marketplace.

", "DescribeTrainingJob": "

Returns information about a training job.

Some of the attributes below only appear if the training job successfully starts. If the training job fails, TrainingJobStatus is Failed and, depending on the FailureReason, attributes like TrainingStartTime, TrainingTimeInSeconds, TrainingEndTime, and BillableTimeInSeconds may not be present in the response.

", "DescribeTransformJob": "

Returns information about a transform job.

", "DescribeTrial": "

Provides a list of a trial's properties.

", @@ -137,7 +137,7 @@ "DisassociateTrialComponent": "

Disassociates a trial component from a trial. This doesn't effect other trials the component is associated with. Before you can delete a component, you must disassociate the component from all trials it is associated with. To associate a trial component with a trial, call the AssociateTrialComponent API.

To get a list of the trials a component is associated with, use the Search API. Specify ExperimentTrialComponent for the Resource parameter. The list appears in the response under Results.TrialComponent.Parents.

", "EnableSagemakerServicecatalogPortfolio": "

Enables using Service Catalog in SageMaker. Service Catalog is used to create SageMaker projects.

", "GetDeviceFleetReport": "

Describes a fleet.

", - "GetModelPackageGroupPolicy": "

Gets a resource policy that manages access for a model group. For information about resource policies, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide..

", + "GetModelPackageGroupPolicy": "

Gets a resource policy that manages access for a model group. For information about resource policies, see Identity-based policies and resource-based policies in the Amazon Web Services Identity and Access Management User Guide..

", "GetSagemakerServicecatalogPortfolioStatus": "

Gets the status of Service Catalog in SageMaker. Service Catalog is used to create SageMaker projects.

", "GetSearchSuggestions": "

An auto-complete API for the search functionality in the Amazon SageMaker console. It returns suggestions of possible matches for the property name to use in Search queries. Provides suggestions for HyperParameters, Tags, and Metrics.

", "ListActions": "

Lists the actions in your account and their properties.

", @@ -169,31 +169,31 @@ "ListLabelingJobsForWorkteam": "

Gets a list of labeling jobs assigned to a specified work team.

", "ListModelBiasJobDefinitions": "

Lists model bias jobs definitions that satisfy various filters.

", "ListModelExplainabilityJobDefinitions": "

Lists model explainability job definitions that satisfy various filters.

", - "ListModelPackageGroups": "

Gets a list of the model groups in your AWS account.

", + "ListModelPackageGroups": "

Gets a list of the model groups in your Amazon Web Services account.

", "ListModelPackages": "

Lists the model packages that have been created.

", "ListModelQualityJobDefinitions": "

Gets a list of model quality monitoring job definitions in your account.

", - "ListModels": "

Lists models created with the CreateModel API.

", + "ListModels": "

Lists models created with the CreateModel API.

", "ListMonitoringExecutions": "

Returns list of all monitoring job executions.

", "ListMonitoringSchedules": "

Returns list of all monitoring schedules.

", "ListNotebookInstanceLifecycleConfigs": "

Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API.

", - "ListNotebookInstances": "

Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region.

", + "ListNotebookInstances": "

Returns a list of the Amazon SageMaker notebook instances in the requester's account in an Amazon Web Services Region.

", "ListPipelineExecutionSteps": "

Gets a list of PipeLineExecutionStep objects.

", "ListPipelineExecutions": "

Gets a list of the pipeline executions.

", "ListPipelineParametersForExecution": "

Gets a list of parameters for a pipeline execution.

", "ListPipelines": "

Gets a list of pipelines.

", "ListProcessingJobs": "

Lists processing jobs that satisfy various filters.

", - "ListProjects": "

Gets a list of the projects in an AWS account.

", - "ListSubscribedWorkteams": "

Gets a list of the work teams that you are subscribed to in the AWS Marketplace. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

", + "ListProjects": "

Gets a list of the projects in an Amazon Web Services account.

", + "ListSubscribedWorkteams": "

Gets a list of the work teams that you are subscribed to in the Amazon Web Services Marketplace. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

", "ListTags": "

Returns the tags for the specified Amazon SageMaker resource.

", - "ListTrainingJobs": "

Lists training jobs.

When StatusEquals and MaxResults are set at the same time, the MaxResults number of training jobs are first retrieved ignoring the StatusEquals parameter and then they are filtered by the StatusEquals parameter, which is returned as a response.

For example, if ListTrainingJobs is invoked with the following parameters:

{ ... MaxResults: 100, StatusEquals: InProgress ... }

First, 100 trainings jobs with any status, including those other than InProgress, are selected (sorted according to the creation time, from the most current to the oldest). Next, those with a status of InProgress are returned.

You can quickly test the API using the following AWS CLI code.

aws sagemaker list-training-jobs --max-results 100 --status-equals InProgress

", + "ListTrainingJobs": "

Lists training jobs.

When StatusEquals and MaxResults are set at the same time, the MaxResults number of training jobs are first retrieved ignoring the StatusEquals parameter and then they are filtered by the StatusEquals parameter, which is returned as a response.

For example, if ListTrainingJobs is invoked with the following parameters:

{ ... MaxResults: 100, StatusEquals: InProgress ... }

First, 100 trainings jobs with any status, including those other than InProgress, are selected (sorted according to the creation time, from the most current to the oldest). Next, those with a status of InProgress are returned.

You can quickly test the API using the following Amazon Web Services CLI code.

aws sagemaker list-training-jobs --max-results 100 --status-equals InProgress

", "ListTrainingJobsForHyperParameterTuningJob": "

Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched.

", "ListTransformJobs": "

Lists transform jobs.

", "ListTrialComponents": "

Lists the trial components in your account. You can sort the list by trial component name or creation time. You can filter the list to show only components that were created in a specific time range. You can also filter on one of the following:

", "ListTrials": "

Lists the trials in your account. Specify an experiment name to limit the list to the trials that are part of that experiment. Specify a trial component name to limit the list to the trials that associated with that trial component. The list can be filtered to show only trials that were created in a specific time range. The list can be sorted by trial name or creation time.

", "ListUserProfiles": "

Lists user profiles.

", - "ListWorkforces": "

Use this operation to list all private and vendor workforces in an AWS Region. Note that you can only have one private workforce per AWS Region.

", + "ListWorkforces": "

Use this operation to list all private and vendor workforces in an Amazon Web Services Region. Note that you can only have one private workforce per Amazon Web Services Region.

", "ListWorkteams": "

Gets a list of private work teams that you have defined in a region. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

", - "PutModelPackageGroupPolicy": "

Adds a resouce policy to control access to a model group. For information about resoure policies, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide..

", + "PutModelPackageGroupPolicy": "

Adds a resouce policy to control access to a model group. For information about resoure policies, see Identity-based policies and resource-based policies in the Amazon Web Services Identity and Access Management User Guide..

", "RegisterDevices": "

Register devices.

", "RenderUiTemplate": "

Renders the UI template so that you can preview the worker's experience.

", "Search": "

Finds Amazon SageMaker resources that match a search query. Matching resources are returned as a list of SearchRecord objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numeric, text, Boolean, and timestamp.

", @@ -209,7 +209,7 @@ "StopLabelingJob": "

Stops a running labeling job. A job that is stopped cannot be restarted. Any results obtained before the job is stopped are placed in the Amazon S3 output bucket.

", "StopMonitoringSchedule": "

Stops a previously started monitoring schedule.

", "StopNotebookInstance": "

Terminates the ML compute instance. Before terminating the instance, Amazon SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves the ML storage volume. Amazon SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

", - "StopPipelineExecution": "

Stops a pipeline execution.

", + "StopPipelineExecution": "

Stops a pipeline execution.

A pipeline execution won't stop while a callback step is running. When you call StopPipelineExecution on a pipeline execution with a running callback step, SageMaker Pipelines sends an additional Amazon SQS message to the specified SQS queue. The body of the SQS message contains a \"Status\" field which is set to \"Stopping\".

You should add logic to your Amazon SQS message consumer to take any needed action (for example, resource cleanup) upon receipt of the message followed by a call to SendPipelineExecutionStepSuccess or SendPipelineExecutionStepFailure.

Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

", "StopProcessingJob": "

Stops a processing job.

", "StopTrainingJob": "

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.

When it receives a StopTrainingJob request, Amazon SageMaker changes the status of the job to Stopping. After Amazon SageMaker stops the job, it sets the status to Stopped.

", "StopTransformJob": "

Stops a transform job.

When Amazon SageMaker receives a StopTransformJob request, the status of the job changes to Stopping. After Amazon SageMaker stops the job, the status is set to Stopped. When you stop a transform job before it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

", @@ -248,7 +248,7 @@ "AccountId": { "base": null, "refs": { - "LabelingJobForWorkteamSummary$WorkRequesterAccountId": "

The AWS account ID of the account used to start the labeling job.

" + "LabelingJobForWorkteamSummary$WorkRequesterAccountId": "

The Amazon Web Services account ID of the account used to start the labeling job.

" } }, "ActionArn": { @@ -313,10 +313,10 @@ "AdditionalCodeRepositoryNamesOrUrls": { "base": null, "refs": { - "CreateNotebookInstanceInput$AdditionalCodeRepositories": "

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", - "DescribeNotebookInstanceOutput$AdditionalCodeRepositories": "

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", - "NotebookInstanceSummary$AdditionalCodeRepositories": "

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", - "UpdateNotebookInstanceInput$AdditionalCodeRepositories": "

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "CreateNotebookInstanceInput$AdditionalCodeRepositories": "

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", + "DescribeNotebookInstanceOutput$AdditionalCodeRepositories": "

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", + "NotebookInstanceSummary$AdditionalCodeRepositories": "

An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", + "UpdateNotebookInstanceInput$AdditionalCodeRepositories": "

An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" } }, "AgentVersion": { @@ -419,7 +419,7 @@ } }, "AlgorithmValidationProfile": { - "base": "

Defines a training job and a batch transform job that Amazon SageMaker runs to validate your algorithm.

The data provided in the validation profile is made available to your buyers on AWS Marketplace.

", + "base": "

Defines a training job and a batch transform job that Amazon SageMaker runs to validate your algorithm.

The data provided in the validation profile is made available to your buyers on Amazon Web Services Marketplace.

", "refs": { "AlgorithmValidationProfiles$member": null } @@ -574,13 +574,13 @@ "ArnOrName": { "base": null, "refs": { - "AlgorithmSpecification$AlgorithmName": "

The name of the algorithm resource to use for the training job. This must be an algorithm resource that you created or subscribe to on AWS Marketplace. If you specify a value for this parameter, you can't specify a value for TrainingImage.

", + "AlgorithmSpecification$AlgorithmName": "

The name of the algorithm resource to use for the training job. This must be an algorithm resource that you created or subscribe to on Amazon Web Services Marketplace. If you specify a value for this parameter, you can't specify a value for TrainingImage.

", "DeleteModelPackageGroupInput$ModelPackageGroupName": "

The name of the model group to delete.

", "DescribeAlgorithmInput$AlgorithmName": "

The name of the algorithm to describe.

", "DescribeModelPackageGroupInput$ModelPackageGroupName": "

The name of the model group to describe.

", "HyperParameterAlgorithmSpecification$AlgorithmName": "

The name of the resource algorithm to use for the hyperparameter tuning job. If you specify a value for this parameter, do not specify a value for TrainingImage.

", "ListModelPackagesInput$ModelPackageGroupName": "

A filter that returns only model versions that belong to the specified model group.

", - "SourceAlgorithm$AlgorithmName": "

The name of an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your Amazon SageMaker account or an algorithm in AWS Marketplace that you are subscribed to.

" + "SourceAlgorithm$AlgorithmName": "

The name of an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your Amazon SageMaker account or an algorithm in Amazon Web Services Marketplace that you are subscribed to.

" } }, "ArtifactArn": { @@ -1046,7 +1046,7 @@ "MonitoringNetworkConfig$EnableNetworkIsolation": "

Whether to allow inbound and outbound network calls to and from the containers used for the monitoring job.

", "NetworkConfig$EnableInterContainerTrafficEncryption": "

Whether to encrypt all communications between distributed processing jobs. Choose True to encrypt communications. Encryption provides greater security for distributed processing jobs, but the processing might take longer.

", "NetworkConfig$EnableNetworkIsolation": "

Whether to allow inbound and outbound network calls to and from the containers used for the processing job.

", - "OfflineStoreConfig$DisableGlueTableCreation": "

Set to True to disable the automatic creation of an AWS Glue table when configuring an OfflineStore.

", + "OfflineStoreConfig$DisableGlueTableCreation": "

Set to True to disable the automatic creation of an Amazon Web Services Glue table when configuring an OfflineStore.

", "OnlineStoreConfig$EnableOnlineStore": "

Turn OnlineStore off by specifying False for the EnableOnlineStore flag. Turn OnlineStore on by specifying True for the EnableOnlineStore flag.

The default value is False.

", "TrainingJob$EnableNetworkIsolation": "

If the TrainingJob was created with network isolation, the value is set to true. If network isolation is enabled, nodes can't communicate beyond the VPC they run in.

", "TrainingJob$EnableInterContainerTrafficEncryption": "

To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

", @@ -1230,11 +1230,11 @@ "CertifyForMarketplace": { "base": null, "refs": { - "CreateAlgorithmInput$CertifyForMarketplace": "

Whether to certify the algorithm so that it can be listed in AWS Marketplace.

", - "CreateModelPackageInput$CertifyForMarketplace": "

Whether to certify the model package for listing on AWS Marketplace.

This parameter is optional for unversioned models, and does not apply to versioned models.

", - "DescribeAlgorithmOutput$CertifyForMarketplace": "

Whether the algorithm is certified to be listed in AWS Marketplace.

", - "DescribeModelPackageOutput$CertifyForMarketplace": "

Whether the model package is certified for listing on AWS Marketplace.

", - "ModelPackage$CertifyForMarketplace": "

Whether the model package is to be certified to be listed on AWS Marketplace. For information about listing model packages on AWS Marketplace, see List Your Algorithm or Model Package on AWS Marketplace.

" + "CreateAlgorithmInput$CertifyForMarketplace": "

Whether to certify the algorithm so that it can be listed in Amazon Web Services Marketplace.

", + "CreateModelPackageInput$CertifyForMarketplace": "

Whether to certify the model package for listing on Amazon Web Services Marketplace.

This parameter is optional for unversioned models, and does not apply to versioned models.

", + "DescribeAlgorithmOutput$CertifyForMarketplace": "

Whether the algorithm is certified to be listed in Amazon Web Services Marketplace.

", + "DescribeModelPackageOutput$CertifyForMarketplace": "

Whether the model package is certified for listing on Amazon Web Services Marketplace.

", + "ModelPackage$CertifyForMarketplace": "

Whether the model package is to be certified to be listed on Amazon Web Services Marketplace. For information about listing model packages on Amazon Web Services Marketplace, see List Your Algorithm or Model Package on Amazon Web Services Marketplace.

" } }, "Channel": { @@ -1301,7 +1301,7 @@ "ClientToken": { "base": null, "refs": { - "CreateImageVersionRequest$ClientToken": "

A unique ID. If not specified, the AWS CLI and AWS SDKs, such as the SDK for Python (Boto3), add a unique value to the call.

", + "CreateImageVersionRequest$ClientToken": "

A unique ID. If not specified, the Amazon Web Services CLI and Amazon Web Services SDKs, such as the SDK for Python (Boto3), add a unique value to the call.

", "CreateModelPackageInput$ClientToken": "

A unique token that guarantees that the call to this API is idempotent.

" } }, @@ -1330,11 +1330,11 @@ "base": null, "refs": { "AdditionalCodeRepositoryNamesOrUrls$member": null, - "CreateNotebookInstanceInput$DefaultCodeRepository": "

A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", - "DescribeNotebookInstanceOutput$DefaultCodeRepository": "

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", + "CreateNotebookInstanceInput$DefaultCodeRepository": "

A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", + "DescribeNotebookInstanceOutput$DefaultCodeRepository": "

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", "ListNotebookInstancesInput$AdditionalCodeRepositoryEquals": "

A filter that returns only notebook instances with associated with the specified git repository.

", - "NotebookInstanceSummary$DefaultCodeRepository": "

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", - "UpdateNotebookInstanceInput$DefaultCodeRepository": "

The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" + "NotebookInstanceSummary$DefaultCodeRepository": "

The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

", + "UpdateNotebookInstanceInput$DefaultCodeRepository": "

The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with Amazon SageMaker Notebook Instances.

" } }, "CodeRepositorySortBy": { @@ -1358,7 +1358,7 @@ "CodeRepositorySummaryList": { "base": null, "refs": { - "ListCodeRepositoriesOutput$CodeRepositorySummaryList": "

Gets a list of summaries of the Git repositories. Each summary specifies the following values for the repository:

" + "ListCodeRepositoriesOutput$CodeRepositorySummaryList": "

Gets a list of summaries of the Git repositories. Each summary specifies the following values for the repository:

" } }, "CognitoConfig": { @@ -3382,8 +3382,8 @@ "EdgePresetDeploymentType": { "base": null, "refs": { - "EdgeOutputConfig$PresetDeploymentType": "

The deployment type SageMaker Edge Manager will create. Currently only supports AWS IoT Greengrass Version 2 components.

", - "EdgePresetDeploymentOutput$Type": "

The deployment type created by SageMaker Edge Manager. Currently only supports AWS IoT Greengrass Version 2 components.

" + "EdgeOutputConfig$PresetDeploymentType": "

The deployment type SageMaker Edge Manager will create. Currently only supports Amazon Web Services IoT Greengrass Version 2 components.

", + "EdgePresetDeploymentOutput$Type": "

The deployment type created by SageMaker Edge Manager. Currently only supports Amazon Web Services IoT Greengrass Version 2 components.

" } }, "EdgeVersion": { @@ -3414,8 +3414,8 @@ "EnableIotRoleAlias": { "base": null, "refs": { - "CreateDeviceFleetRequest$EnableIotRoleAlias": "

Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: \"SageMakerEdge-{DeviceFleetName}\".

For example, if your device fleet is called \"demo-fleet\", the name of the role alias will be \"SageMakerEdge-demo-fleet\".

", - "UpdateDeviceFleetRequest$EnableIotRoleAlias": "

Whether to create an AWS IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: \"SageMakerEdge-{DeviceFleetName}\".

For example, if your device fleet is called \"demo-fleet\", the name of the role alias will be \"SageMakerEdge-demo-fleet\".

" + "CreateDeviceFleetRequest$EnableIotRoleAlias": "

Whether to create an Amazon Web Services IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: \"SageMakerEdge-{DeviceFleetName}\".

For example, if your device fleet is called \"demo-fleet\", the name of the role alias will be \"SageMakerEdge-demo-fleet\".

", + "UpdateDeviceFleetRequest$EnableIotRoleAlias": "

Whether to create an Amazon Web Services IoT Role Alias during device fleet creation. The name of the role alias generated will match this pattern: \"SageMakerEdge-{DeviceFleetName}\".

For example, if your device fleet is called \"demo-fleet\", the name of the role alias will be \"SageMakerEdge-demo-fleet\".

" } }, "EnableSagemakerServicecatalogPortfolioInput": { @@ -3504,7 +3504,7 @@ "EndpointName": { "base": null, "refs": { - "CreateEndpointInput$EndpointName": "

The name of the endpoint.The name must be unique within an AWS Region in your AWS account. The name is case-insensitive in CreateEndpoint, but the case is preserved and must be matched in .

", + "CreateEndpointInput$EndpointName": "

The name of the endpoint.The name must be unique within an Amazon Web Services Region in your Amazon Web Services account. The name is case-insensitive in CreateEndpoint, but the case is preserved and must be matched in .

", "DeleteEndpointInput$EndpointName": "

The name of the endpoint that you want to delete.

", "DescribeEndpointInput$EndpointName": "

The name of the endpoint.

", "DescribeEndpointOutput$EndpointName": "

Name of the endpoint.

", @@ -3592,7 +3592,7 @@ "CompilationJobSummary$CompilationJobName": "

The name of the model compilation job that you want a summary for.

", "CreateAlgorithmInput$AlgorithmName": "

The name of the algorithm.

", "CreateCodeRepositoryInput$CodeRepositoryName": "

The name of the Git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).

", - "CreateCompilationJobRequest$CompilationJobName": "

A name for the model compilation job. The name must be unique within the AWS Region and within your AWS account.

", + "CreateCompilationJobRequest$CompilationJobName": "

A name for the model compilation job. The name must be unique within the Amazon Web Services Region and within your Amazon Web Services account.

", "CreateDeviceFleetRequest$DeviceFleetName": "

The name of the fleet that the device belongs to.

", "CreateEdgePackagingJobRequest$EdgePackagingJobName": "

The name of the edge packaging job.

", "CreateEdgePackagingJobRequest$CompilationJobName": "

The name of the SageMaker Neo compilation job that will be used to locate model artifacts for packaging.

", @@ -3664,7 +3664,8 @@ "base": null, "refs": { "AutoMLContainerDefinition$Environment": "

The environment variables to set in the container. For more information, see .

", - "ContainerDefinition$Environment": "

The environment variables to set in the Docker container. Each key and value in the Environment string to string map can have length of up to 1024. We support up to 16 entries in the map.

" + "ContainerDefinition$Environment": "

The environment variables to set in the Docker container. Each key and value in the Environment string to string map can have length of up to 1024. We support up to 16 entries in the map.

", + "ModelPackageContainerDefinition$Environment": "

The environment variables to set in the Docker container. Each key and value in the Environment string to string map can have length of up to 1024. We support up to 16 entries in the map.

" } }, "EnvironmentValue": { @@ -3744,14 +3745,14 @@ "AssociationSummary$SourceName": "

The name of the source.

", "AssociationSummary$DestinationName": "

The name of the destination.

", "ContextSummary$ContextName": "

The name of the context.

", - "CreateActionRequest$ActionName": "

The name of the action. Must be unique to your account in an AWS Region.

", - "CreateArtifactRequest$ArtifactName": "

The name of the artifact. Must be unique to your account in an AWS Region.

", - "CreateContextRequest$ContextName": "

The name of the context. Must be unique to your account in an AWS Region.

", - "CreateExperimentRequest$ExperimentName": "

The name of the experiment. The name must be unique in your AWS account and is not case-sensitive.

", + "CreateActionRequest$ActionName": "

The name of the action. Must be unique to your account in an Amazon Web Services Region.

", + "CreateArtifactRequest$ArtifactName": "

The name of the artifact. Must be unique to your account in an Amazon Web Services Region.

", + "CreateContextRequest$ContextName": "

The name of the context. Must be unique to your account in an Amazon Web Services Region.

", + "CreateExperimentRequest$ExperimentName": "

The name of the experiment. The name must be unique in your Amazon Web Services account and is not case-sensitive.

", "CreateExperimentRequest$DisplayName": "

The name of the experiment as displayed. The name doesn't need to be unique. If you don't specify DisplayName, the value in ExperimentName is displayed.

", - "CreateTrialComponentRequest$TrialComponentName": "

The name of the component. The name must be unique in your AWS account and is not case-sensitive.

", + "CreateTrialComponentRequest$TrialComponentName": "

The name of the component. The name must be unique in your Amazon Web Services account and is not case-sensitive.

", "CreateTrialComponentRequest$DisplayName": "

The name of the component as displayed. The name doesn't need to be unique. If DisplayName isn't specified, TrialComponentName is displayed.

", - "CreateTrialRequest$TrialName": "

The name of the trial. The name must be unique in your AWS account and is not case-sensitive.

", + "CreateTrialRequest$TrialName": "

The name of the trial. The name must be unique in your Amazon Web Services account and is not case-sensitive.

", "CreateTrialRequest$DisplayName": "

The name of the trial as displayed. The name doesn't need to be unique. If DisplayName isn't specified, TrialName is displayed.

", "CreateTrialRequest$ExperimentName": "

The name of the experiment to associate the trial with.

", "DeleteActionRequest$ActionName": "

The name of the action to delete.

", @@ -3934,8 +3935,8 @@ "FeatureGroupName": { "base": null, "refs": { - "CreateFeatureGroupRequest$FeatureGroupName": "

The name of the FeatureGroup. The name must be unique within an AWS Region in an AWS account. The name:

", - "DeleteFeatureGroupRequest$FeatureGroupName": "

The name of the FeatureGroup you want to delete. The name must be unique within an AWS Region in an AWS account.

", + "CreateFeatureGroupRequest$FeatureGroupName": "

The name of the FeatureGroup. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account. The name:

", + "DeleteFeatureGroupRequest$FeatureGroupName": "

The name of the FeatureGroup you want to delete. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account.

", "DescribeFeatureGroupRequest$FeatureGroupName": "

The name of the FeatureGroup you want described.

", "DescribeFeatureGroupResponse$FeatureGroupName": "

he name of the FeatureGroup.

", "FeatureGroup$FeatureGroupName": "

The name of the FeatureGroup.

", @@ -4220,17 +4221,17 @@ } }, "GitConfig": { - "base": "

Specifies configuration details for a Git repository in your AWS account.

", + "base": "

Specifies configuration details for a Git repository in your Amazon Web Services account.

", "refs": { - "CodeRepositorySummary$GitConfig": "

Configuration details for the Git repository, including the URL where it is located and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.

", + "CodeRepositorySummary$GitConfig": "

Configuration details for the Git repository, including the URL where it is located and the ARN of the Amazon Web Services Secrets Manager secret that contains the credentials used to access the repository.

", "CreateCodeRepositoryInput$GitConfig": "

Specifies details about the repository, including the URL where the repository is located, the default branch, and credentials to use to access the repository.

", - "DescribeCodeRepositoryOutput$GitConfig": "

Configuration details about the repository, including the URL where the repository is located, the default branch, and the Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the repository.

" + "DescribeCodeRepositoryOutput$GitConfig": "

Configuration details about the repository, including the URL where the repository is located, the default branch, and the Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the credentials used to access the repository.

" } }, "GitConfigForUpdate": { "base": "

Specifies configuration details for a Git repository when the repository is updated.

", "refs": { - "UpdateCodeRepositoryInput$GitConfig": "

The configuration of the git repository, including the URL and the Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

" + "UpdateCodeRepositoryInput$GitConfig": "

The configuration of the git repository, including the URL and the Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the credentials used to access the repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

" } }, "GitConfigUrl": { @@ -4305,7 +4306,7 @@ "HumanLoopConfig$HumanTaskUiArn": "

The Amazon Resource Name (ARN) of the human task user interface.

You can use standard HTML and Crowd HTML Elements to create a custom worker task template. You use this template to create a human task UI.

To learn how to create a custom HTML template, see Create Custom Worker Task Template.

To learn how to create a human task UI, which is a worker task template that can be used in a flow definition, see Create and Delete a Worker Task Templates.

", "HumanTaskUiSummary$HumanTaskUiArn": "

The Amazon Resource Name (ARN) of the human task user interface.

", "RenderUiTemplateRequest$HumanTaskUiArn": "

The HumanTaskUiArn of the worker UI that you want to render. Do not provide a HumanTaskUiArn if you use the UiTemplate parameter.

See a list of available Human Ui Amazon Resource Names (ARNs) in UiConfig.

", - "UiConfig$HumanTaskUiArn": "

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks.

Use this parameter when you are creating a labeling job for 3D point cloud and video fram labeling jobs. Use your labeling job task type to select one of the following ARNs and use it with this parameter when you create a labeling job. Replace aws-region with the AWS region you are creating your labeling job in.

3D Point Cloud HumanTaskUiArns

Use this HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object detection adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object tracking adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud semantic segmentation adjustment labeling jobs.

Video Frame HumanTaskUiArns

Use this HumanTaskUiArn for video frame object detection and video frame object detection adjustment labeling jobs.

Use this HumanTaskUiArn for video frame object tracking and video frame object tracking adjustment labeling jobs.

" + "UiConfig$HumanTaskUiArn": "

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks.

Use this parameter when you are creating a labeling job for 3D point cloud and video fram labeling jobs. Use your labeling job task type to select one of the following ARNs and use it with this parameter when you create a labeling job. Replace aws-region with the Amazon Web Services region you are creating your labeling job in.

3D Point Cloud HumanTaskUiArns

Use this HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object detection adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object tracking adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud semantic segmentation adjustment labeling jobs.

Video Frame HumanTaskUiArns

Use this HumanTaskUiArn for video frame object detection and video frame object detection adjustment labeling jobs.

Use this HumanTaskUiArn for video frame object tracking and video frame object tracking adjustment labeling jobs.

" } }, "HumanTaskUiName": { @@ -4423,7 +4424,7 @@ "HyperParameterTuningJobName": { "base": null, "refs": { - "CreateHyperParameterTuningJobRequest$HyperParameterTuningJobName": "

The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same AWS account and AWS Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.

", + "CreateHyperParameterTuningJobRequest$HyperParameterTuningJobName": "

The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same Amazon Web Services account and Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.

", "DescribeHyperParameterTuningJobRequest$HyperParameterTuningJobName": "

The name of the tuning job.

", "DescribeHyperParameterTuningJobResponse$HyperParameterTuningJobName": "

The name of the tuning job.

", "HyperParameterTrainingJobSummary$TuningJobName": "

The HyperParameter tuning job that launched the training job.

", @@ -4727,7 +4728,7 @@ "InferenceImage": { "base": null, "refs": { - "DescribeCompilationJobResponse$InferenceImage": null + "DescribeCompilationJobResponse$InferenceImage": "

The inference image to use when compiling a model. Specify an image only if the target device is a cloud instance.

" } }, "InferenceSpecification": { @@ -4817,7 +4818,7 @@ "IotRoleAlias": { "base": null, "refs": { - "DescribeDeviceFleetResponse$IotRoleAlias": "

The Amazon Resource Name (ARN) alias created in AWS Internet of Things (IoT).

" + "DescribeDeviceFleetResponse$IotRoleAlias": "

The Amazon Resource Name (ARN) alias created in Amazon Web Services Internet of Things (IoT).

" } }, "JobReferenceCode": { @@ -4906,39 +4907,39 @@ "KmsKeyId": { "base": null, "refs": { - "AthenaDatasetDefinition$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data generated from an Athena query execution.

", - "AutoMLOutputDataConfig$KmsKeyId": "

The AWS KMS encryption key ID.

", + "AthenaDatasetDefinition$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data generated from an Athena query execution.

", + "AutoMLOutputDataConfig$KmsKeyId": "

The Amazon Web Services KMS encryption key ID.

", "AutoMLSecurityConfig$VolumeKmsKeyId": "

The key used to encrypt stored data.

", "CreateDomainRequest$HomeEfsFileSystemKmsKeyId": "

This member is deprecated and replaced with KmsKeyId.

", - "CreateDomainRequest$KmsKeyId": "

SageMaker uses AWS KMS to encrypt the EFS volume attached to the domain with an AWS managed customer master key (CMK) by default. For more control, specify a customer managed CMK.

", + "CreateDomainRequest$KmsKeyId": "

SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed customer master key (CMK) by default. For more control, specify a customer managed CMK.

", "CreateEdgePackagingJobRequest$ResourceKey": "

The CMK to use when encrypting the EBS volume the edge packaging job runs on.

", - "CreateEndpointConfigInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

The KmsKeyId can be any of the following formats:

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the AWS Key Management Service section Using Key Policies in AWS KMS

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", - "CreateNotebookInstanceInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.

", + "CreateEndpointConfigInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

The KmsKeyId can be any of the following formats:

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the Amazon Web Services Key Management Service section Using Key Policies in Amazon Web Services KMS

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", + "CreateNotebookInstanceInput$KmsKeyId": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide.

", "DataCaptureConfig$KmsKeyId": "

", "DataCaptureConfigSummary$KmsKeyId": "

", "DescribeDomainResponse$HomeEfsFileSystemKmsKeyId": "

This member is deprecated and replaced with KmsKeyId.

", - "DescribeDomainResponse$KmsKeyId": "

The AWS KMS customer managed CMK used to encrypt the EFS volume attached to the domain.

", + "DescribeDomainResponse$KmsKeyId": "

The Amazon Web Services KMS customer managed CMK used to encrypt the EFS volume attached to the domain.

", "DescribeEdgePackagingJobResponse$ResourceKey": "

The CMK to use when encrypting the EBS volume the job run on.

", - "DescribeEndpointConfigOutput$KmsKeyId": "

AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

", - "DescribeNotebookInstanceOutput$KmsKeyId": "

The AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

", - "EdgeOutputConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account.

", + "DescribeEndpointConfigOutput$KmsKeyId": "

Amazon Web Services KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

", + "DescribeNotebookInstanceOutput$KmsKeyId": "

The Amazon Web Services KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

", + "EdgeOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account.

", "FlowDefinitionOutputConfig$KmsKeyId": "

The Amazon Key Management Service (KMS) key ID for server-side encryption.

", - "LabelingJobOutputConfig$KmsKeyId": "

The AWS Key Management Service ID of the key used to encrypt the output data, if any.

If you provide your own KMS key ID, you must add the required permissions to your KMS key described in Encrypt Output Data and Storage Volume with AWS KMS.

If you don't provide a KMS key ID, Amazon SageMaker uses the default AWS KMS key for Amazon S3 for your role's account to encrypt your output data.

If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

", - "LabelingJobResourceConfig$VolumeKmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training and inference jobs used for automated data labeling.

You can only specify a VolumeKmsKeyId when you create a labeling job with automated data labeling enabled using the API operation CreateLabelingJob. You cannot specify an AWS KMS customer managed CMK to encrypt the storage volume used for automated data labeling model training and inference when you create a labeling job using the console. To learn more, see Output Data and Storage Volume Encryption.

The VolumeKmsKeyId can be any of the following formats:

", - "MonitoringClusterConfig$VolumeKmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.

", - "MonitoringOutputConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", - "OnlineStoreSecurityConfig$KmsKeyId": "

The ID of the AWS Key Management Service (AWS KMS) key that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.

The caller (either IAM user or IAM role) of CreateFeatureGroup must have below permissions to the OnlineStore KmsKeyId:

The caller (either IAM user or IAM role) to all DataPlane operations (PutRecord, GetRecord, DeleteRecord) must have the following permissions to the KmsKeyId:

", + "LabelingJobOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service ID of the key used to encrypt the output data, if any.

If you provide your own KMS key ID, you must add the required permissions to your KMS key described in Encrypt Output Data and Storage Volume with Amazon Web Services KMS.

If you don't provide a KMS key ID, Amazon SageMaker uses the default Amazon Web Services KMS key for Amazon S3 for your role's account to encrypt your output data.

If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

", + "LabelingJobResourceConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training and inference jobs used for automated data labeling.

You can only specify a VolumeKmsKeyId when you create a labeling job with automated data labeling enabled using the API operation CreateLabelingJob. You cannot specify an Amazon Web Services KMS customer managed CMK to encrypt the storage volume used for automated data labeling model training and inference when you create a labeling job using the console. To learn more, see Output Data and Storage Volume Encryption.

The VolumeKmsKeyId can be any of the following formats:

", + "MonitoringClusterConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.

", + "MonitoringOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", + "OnlineStoreSecurityConfig$KmsKeyId": "

The ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.

The caller (either IAM user or IAM role) of CreateFeatureGroup must have below permissions to the OnlineStore KmsKeyId:

The caller (either IAM user or IAM role) to all DataPlane operations (PutRecord, GetRecord, DeleteRecord) must have the following permissions to the KmsKeyId:

", "OutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KmsKeyId can be any of the following formats:

", - "OutputDataConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

", - "ProcessingClusterConfig$VolumeKmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the processing job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", - "ProcessingOutputConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the processing job output. KmsKeyId can be an ID of a KMS key, ARN of a KMS key, alias of a KMS key, or alias of a KMS key. The KmsKeyId is applied to all outputs.

", - "ProductionVariantCoreDumpConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint and UpdateEndpoint requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

", - "RedshiftDatasetDefinition$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data from a Redshift execution.

", - "ResourceConfig$VolumeKmsKeyId": "

The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

The VolumeKmsKeyId can be in any of the following formats:

", - "S3StorageConfig$KmsKeyId": "

The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location.

The IAM roleARN that is passed as a parameter to CreateFeatureGroup must have below permissions to the KmsKeyId:

", - "SharingSettings$S3KmsKeyId": "

When NotebookOutputOption is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket.

", - "TransformOutput$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateModel request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

", - "TransformResources$VolumeKmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt model data on the storage volume attached to the ML compute instance(s) that run the batch transform job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

The VolumeKmsKeyId can be any of the following formats:

" + "OutputDataConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", + "ProcessingClusterConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the processing job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", + "ProcessingOutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the processing job output. KmsKeyId can be an ID of a KMS key, ARN of a KMS key, alias of a KMS key, or alias of a KMS key. The KmsKeyId is applied to all outputs.

", + "ProductionVariantCoreDumpConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint and UpdateEndpoint requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", + "RedshiftDatasetDefinition$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data from a Redshift execution.

", + "ResourceConfig$VolumeKmsKeyId": "

The Amazon Web Services KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

The VolumeKmsKeyId can be in any of the following formats:

", + "S3StorageConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location.

The IAM roleARN that is passed as a parameter to CreateFeatureGroup must have below permissions to the KmsKeyId:

", + "SharingSettings$S3KmsKeyId": "

When NotebookOutputOption is Allowed, the Amazon Web Services Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket.

", + "TransformOutput$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateModel request. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", + "TransformResources$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt model data on the storage volume attached to the ML compute instance(s) that run the batch transform job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

The VolumeKmsKeyId can be any of the following formats:

" } }, "LabelAttributeName": { @@ -5034,7 +5035,7 @@ "LabelingJobName": { "base": null, "refs": { - "CreateLabelingJobRequest$LabelingJobName": "

The name of the labeling job. This name is used to identify the job in a list of labeling jobs. Labeling job names must be unique within an AWS account and region. LabelingJobName is not case sensitive. For example, Example-job and example-job are considered the same labeling job name by Ground Truth.

", + "CreateLabelingJobRequest$LabelingJobName": "

The name of the labeling job. This name is used to identify the job in a list of labeling jobs. Labeling job names must be unique within an Amazon Web Services account and region. LabelingJobName is not case sensitive. For example, Example-job and example-job are considered the same labeling job name by Ground Truth.

", "DescribeLabelingJobRequest$LabelingJobName": "

The name of the labeling job to return information for.

", "DescribeLabelingJobResponse$LabelingJobName": "

The name assigned to the labeling job when it was created.

", "LabelingJobForWorkteamSummary$LabelingJobName": "

The name of the labeling job that the work team is assigned to.

", @@ -5052,8 +5053,8 @@ "LabelingJobOutputConfig": { "base": "

Output configuration information for a labeling job.

", "refs": { - "CreateLabelingJobRequest$OutputConfig": "

The location of the output data and the AWS Key Management Service key ID for the key used to encrypt the output data, if any.

", - "DescribeLabelingJobResponse$OutputConfig": "

The location of the job's output data and the AWS Key Management Service key ID for the key used to encrypt the output data, if any.

" + "CreateLabelingJobRequest$OutputConfig": "

The location of the output data and the Amazon Web Services Key Management Service key ID for the key used to encrypt the output data, if any.

", + "DescribeLabelingJobResponse$OutputConfig": "

The location of the job's output data and the Amazon Web Services Key Management Service key ID for the key used to encrypt the output data, if any.

" } }, "LabelingJobResourceConfig": { @@ -6120,7 +6121,7 @@ "base": null, "refs": { "CreateModelInput$ModelName": "

The name of the new model.

", - "CreateTransformJobRequest$ModelName": "

The name of the model that you want to use for the transform job. ModelName must be the name of an existing Amazon SageMaker model within an AWS Region in an AWS account.

", + "CreateTransformJobRequest$ModelName": "

The name of the model that you want to use for the transform job. ModelName must be the name of an existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services account.

", "DeleteModelInput$ModelName": "

The name of the model to delete.

", "DescribeModelInput$ModelName": "

The name of the model.

", "DescribeModelOutput$ModelName": "

Name of the Amazon SageMaker model.

", @@ -6204,7 +6205,7 @@ "ModelPackageGroupSummaryList": { "base": null, "refs": { - "ListModelPackageGroupsOutput$ModelPackageGroupSummaryList": "

A list of summaries of the model groups in your AWS account.

" + "ListModelPackageGroupsOutput$ModelPackageGroupSummaryList": "

A list of summaries of the model groups in your Amazon Web Services account.

" } }, "ModelPackageSortBy": { @@ -6260,7 +6261,7 @@ } }, "ModelPackageValidationProfile": { - "base": "

Contains data, such as the inputs and targeted instance types that are used in the process of validating the model package.

The data provided in the validation profile is made available to your buyers on AWS Marketplace.

", + "base": "

Contains data, such as the inputs and targeted instance types that are used in the process of validating the model package.

The data provided in the validation profile is made available to your buyers on Amazon Web Services Marketplace.

", "refs": { "ModelPackageValidationProfiles$member": null } @@ -6447,8 +6448,8 @@ "base": null, "refs": { "CreateDataQualityJobDefinitionRequest$JobDefinitionName": "

The name for the monitoring job definition.

", - "CreateModelBiasJobDefinitionRequest$JobDefinitionName": "

The name of the bias job definition. The name must be unique within an AWS Region in the AWS account.

", - "CreateModelExplainabilityJobDefinitionRequest$JobDefinitionName": "

The name of the model explainability job definition. The name must be unique within an AWS Region in the AWS account.

", + "CreateModelBiasJobDefinitionRequest$JobDefinitionName": "

The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", + "CreateModelExplainabilityJobDefinitionRequest$JobDefinitionName": "

The name of the model explainability job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", "CreateModelQualityJobDefinitionRequest$JobDefinitionName": "

The name of the monitoring job definition.

", "DeleteDataQualityJobDefinitionRequest$JobDefinitionName": "

The name of the data quality monitoring job definition to delete.

", "DeleteModelBiasJobDefinitionRequest$JobDefinitionName": "

The name of the model bias job definition to delete.

", @@ -6456,12 +6457,12 @@ "DeleteModelQualityJobDefinitionRequest$JobDefinitionName": "

The name of the model quality monitoring job definition to delete.

", "DescribeDataQualityJobDefinitionRequest$JobDefinitionName": "

The name of the data quality monitoring job definition to describe.

", "DescribeDataQualityJobDefinitionResponse$JobDefinitionName": "

The name of the data quality monitoring job definition.

", - "DescribeModelBiasJobDefinitionRequest$JobDefinitionName": "

The name of the model bias job definition. The name must be unique within an AWS Region in the AWS account.

", - "DescribeModelBiasJobDefinitionResponse$JobDefinitionName": "

The name of the bias job definition. The name must be unique within an AWS Region in the AWS account.

", - "DescribeModelExplainabilityJobDefinitionRequest$JobDefinitionName": "

The name of the model explainability job definition. The name must be unique within an AWS Region in the AWS account.

", - "DescribeModelExplainabilityJobDefinitionResponse$JobDefinitionName": "

The name of the explainability job definition. The name must be unique within an AWS Region in the AWS account.

", - "DescribeModelQualityJobDefinitionRequest$JobDefinitionName": "

The name of the model quality job. The name must be unique within an AWS Region in the AWS account.

", - "DescribeModelQualityJobDefinitionResponse$JobDefinitionName": "

The name of the quality job definition. The name must be unique within an AWS Region in the AWS account.

", + "DescribeModelBiasJobDefinitionRequest$JobDefinitionName": "

The name of the model bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", + "DescribeModelBiasJobDefinitionResponse$JobDefinitionName": "

The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", + "DescribeModelExplainabilityJobDefinitionRequest$JobDefinitionName": "

The name of the model explainability job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", + "DescribeModelExplainabilityJobDefinitionResponse$JobDefinitionName": "

The name of the explainability job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", + "DescribeModelQualityJobDefinitionRequest$JobDefinitionName": "

The name of the model quality job. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", + "DescribeModelQualityJobDefinitionResponse$JobDefinitionName": "

The name of the quality job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", "ListMonitoringExecutionsRequest$MonitoringJobDefinitionName": "

Gets a list of the monitoring job runs of the specified monitoring job definitions.

", "ListMonitoringSchedulesRequest$MonitoringJobDefinitionName": "

Gets a list of the monitoring schedules for the specified monitoring job definition.

", "MonitoringExecutionSummary$MonitoringJobDefinitionName": "

The name of the monitoring job.

", @@ -6608,7 +6609,7 @@ "MonitoringScheduleName": { "base": null, "refs": { - "CreateMonitoringScheduleRequest$MonitoringScheduleName": "

The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account.

", + "CreateMonitoringScheduleRequest$MonitoringScheduleName": "

The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within an Amazon Web Services account.

", "DeleteMonitoringScheduleRequest$MonitoringScheduleName": "

The name of the monitoring schedule to delete.

", "DescribeMonitoringScheduleRequest$MonitoringScheduleName": "

Name of a previously created monitoring schedule.

", "DescribeMonitoringScheduleResponse$MonitoringScheduleName": "

Name of the monitoring schedule.

", @@ -6618,7 +6619,7 @@ "MonitoringScheduleSummary$MonitoringScheduleName": "

The name of the monitoring schedule.

", "StartMonitoringScheduleRequest$MonitoringScheduleName": "

The name of the schedule to start.

", "StopMonitoringScheduleRequest$MonitoringScheduleName": "

The name of the schedule to stop.

", - "UpdateMonitoringScheduleRequest$MonitoringScheduleName": "

The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account.

" + "UpdateMonitoringScheduleRequest$MonitoringScheduleName": "

The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within an Amazon Web Services account.

" } }, "MonitoringScheduleSortKey": { @@ -6713,7 +6714,7 @@ } }, "NeoVpcConfig": { - "base": null, + "base": "

The VpcConfig configuration object that specifies the VPC that you want the compilation jobs to connect to. For more information on controlling access to your Amazon S3 buckets used for compilation job, see Give Amazon SageMaker Compilation Jobs Access to Resources in Your Amazon VPC.

", "refs": { "CreateCompilationJobRequest$VpcConfig": "

A VpcConfig object that specifies the VPC that you want your compilation job to connect to. Control access to your models by configuring the VPC. For more information, see Protect Compilation Jobs by Using an Amazon Virtual Private Cloud.

", "DescribeCompilationJobResponse$VpcConfig": "

A VpcConfig object that specifies the VPC that you want your compilation job to connect to. Control access to your models by configuring the VPC. For more information, see Protect Compilation Jobs by Using an Amazon Virtual Private Cloud.

" @@ -6728,7 +6729,7 @@ "NeoVpcSecurityGroupIds": { "base": null, "refs": { - "NeoVpcConfig$SecurityGroupIds": null + "NeoVpcConfig$SecurityGroupIds": "

The VPC security group IDs. IDs have the form of sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.

" } }, "NeoVpcSubnetId": { @@ -6740,7 +6741,7 @@ "NeoVpcSubnets": { "base": null, "refs": { - "NeoVpcConfig$Subnets": null + "NeoVpcConfig$Subnets": "

The ID of the subnets in the VPC that you want to connect the compilation job to for accessing the model in Amazon S3.

" } }, "NestedFilters": { @@ -7098,10 +7099,10 @@ } }, "OfflineStoreConfig": { - "base": "

The configuration of an OfflineStore.

Provide an OfflineStoreConfig in a request to CreateFeatureGroup to create an OfflineStore.

To encrypt an OfflineStore using at rest data encryption, specify AWS Key Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig.

", + "base": "

The configuration of an OfflineStore.

Provide an OfflineStoreConfig in a request to CreateFeatureGroup to create an OfflineStore.

To encrypt an OfflineStore using at rest data encryption, specify Amazon Web Services Key Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig.

", "refs": { - "CreateFeatureGroupRequest$OfflineStoreConfig": "

Use this to configure an OfflineFeatureStore. This parameter allows you to specify:

To learn more about this parameter, see OfflineStoreConfig.

", - "DescribeFeatureGroupResponse$OfflineStoreConfig": "

The configuration of the OfflineStore, inducing the S3 location of the OfflineStore, AWS Glue or AWS Hive data catalogue configurations, and the security configuration.

", + "CreateFeatureGroupRequest$OfflineStoreConfig": "

Use this to configure an OfflineFeatureStore. This parameter allows you to specify:

To learn more about this parameter, see OfflineStoreConfig.

", + "DescribeFeatureGroupResponse$OfflineStoreConfig": "

The configuration of the OfflineStore, inducing the S3 location of the OfflineStore, Amazon Web Services Glue or Amazon Web Services Hive data catalogue configurations, and the security configuration.

", "FeatureGroup$OfflineStoreConfig": null } }, @@ -7157,9 +7158,9 @@ } }, "OnlineStoreConfig": { - "base": "

Use this to specify the AWS Key Management Service (KMS) Key ID, or KMSKeyId, for at rest data encryption. You can turn OnlineStore on or off by specifying the EnableOnlineStore flag at General Assembly; the default value is False.

", + "base": "

Use this to specify the Amazon Web Services Key Management Service (KMS) Key ID, or KMSKeyId, for at rest data encryption. You can turn OnlineStore on or off by specifying the EnableOnlineStore flag at General Assembly; the default value is False.

", "refs": { - "CreateFeatureGroupRequest$OnlineStoreConfig": "

You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore flag in OnlineStoreConfig; the default value is False.

You can also include an AWS KMS key ID (KMSKeyId) for at-rest encryption of the OnlineStore.

", + "CreateFeatureGroupRequest$OnlineStoreConfig": "

You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore flag in OnlineStoreConfig; the default value is False.

You can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest encryption of the OnlineStore.

", "DescribeFeatureGroupResponse$OnlineStoreConfig": "

The configuration for the OnlineStore.

", "FeatureGroup$OnlineStoreConfig": null } @@ -7634,10 +7635,10 @@ "ProcessingJobName": { "base": null, "refs": { - "CreateProcessingJobRequest$ProcessingJobName": "

The name of the processing job. The name must be unique within an AWS Region in the AWS account.

", + "CreateProcessingJobRequest$ProcessingJobName": "

The name of the processing job. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", "DataQualityBaselineConfig$BaseliningJobName": "

The name of the job that performs baselining for the data quality monitoring job.

", - "DescribeProcessingJobRequest$ProcessingJobName": "

The name of the processing job. The name must be unique within an AWS Region in the AWS account.

", - "DescribeProcessingJobResponse$ProcessingJobName": "

The name of the processing job. The name must be unique within an AWS Region in the AWS account.

", + "DescribeProcessingJobRequest$ProcessingJobName": "

The name of the processing job. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", + "DescribeProcessingJobResponse$ProcessingJobName": "

The name of the processing job. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.

", "ModelBiasBaselineConfig$BaseliningJobName": "

The name of the baseline model bias job.

", "ModelExplainabilityBaselineConfig$BaseliningJobName": "

The name of the baseline model explainability job.

", "ModelQualityBaselineConfig$BaseliningJobName": "

The name of the job that performs baselining for the monitoring job.

", @@ -7782,7 +7783,7 @@ "base": null, "refs": { "DescribeAlgorithmOutput$ProductId": "

The product identifier of the algorithm.

", - "ModelPackageContainerDefinition$ProductId": "

The AWS Marketplace product ID of the model package.

" + "ModelPackageContainerDefinition$ProductId": "

The Amazon Web Services Marketplace product ID of the model package.

" } }, "ProductListings": { @@ -7984,7 +7985,7 @@ } }, "ProvisioningParameter": { - "base": "

A key value pair used when you provision a project as a service catalog product. For information, see What is AWS Service Catalog.

", + "base": "

A key value pair used when you provision a project as a service catalog product. For information, see What is Amazon Web Services Service Catalog.

", "refs": { "ProvisioningParameters$member": null } @@ -8132,7 +8133,7 @@ "RepositoryCredentialsProviderArn": { "base": null, "refs": { - "RepositoryAuthConfig$RepositoryCredentialsProviderArn": "

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

" + "RepositoryAuthConfig$RepositoryCredentialsProviderArn": "

The Amazon Resource Name (ARN) of an Amazon Web Services Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an Amazon Web Services Lambda function, see Create a Lambda function with the console in the Amazon Web Services Lambda Developer Guide.

" } }, "ResolvedAttributes": { @@ -8253,7 +8254,7 @@ "CreateAutoMLJobRequest$RoleArn": "

The ARN of the role that is used to access the data.

", "CreateCompilationJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

During model compilation, Amazon SageMaker needs your permission to:

You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles.

", "CreateDataQualityJobDefinitionRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", - "CreateDeviceFleetRequest$RoleArn": "

The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).

", + "CreateDeviceFleetRequest$RoleArn": "

The Amazon Resource Name (ARN) that has access to Amazon Web Services Internet of Things (IoT).

", "CreateEdgePackagingJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to download and upload the model, and to contact SageMaker Neo.

", "CreateFeatureGroupRequest$RoleArn": "

The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided.

", "CreateFlowDefinitionRequest$RoleArn": "

The Amazon Resource Name (ARN) of the role needed to call other services on your behalf. For example, arn:aws:iam::1234567890:role/service-role/AmazonSageMaker-ExecutionRole-20180111T151298.

", @@ -8263,27 +8264,27 @@ "CreateModelExplainabilityJobDefinitionRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "CreateModelInput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

", "CreateModelQualityJobDefinitionRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", - "CreateNotebookInstanceInput$RoleArn": "

When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

", + "CreateNotebookInstanceInput$RoleArn": "

When you send any requests to Amazon Web Services resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

", "CreatePipelineRequest$RoleArn": "

The Amazon Resource Name (ARN) of the role used by the pipeline to access and create resources.

", "CreateProcessingJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "CreateTrainingJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

During model training, Amazon SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

", - "DescribeAutoMLJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", + "DescribeAutoMLJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", "DescribeCompilationJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model compilation job.

", "DescribeDataQualityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", - "DescribeDeviceFleetResponse$RoleArn": "

The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT).

", + "DescribeDeviceFleetResponse$RoleArn": "

The Amazon Resource Name (ARN) that has access to Amazon Web Services Internet of Things (IoT).

", "DescribeEdgePackagingJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to download and upload the model, and to contact Neo.

", "DescribeFeatureGroupResponse$RoleArn": "

The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided.

", - "DescribeFlowDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) execution role for the flow definition.

", + "DescribeFlowDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) execution role for the flow definition.

", "DescribeImageResponse$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

", "DescribeLabelingJobResponse$RoleArn": "

The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during data labeling.

", - "DescribeModelBiasJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", - "DescribeModelExplainabilityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", + "DescribeModelBiasJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", + "DescribeModelExplainabilityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

", "DescribeModelOutput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that you specified for the model.

", "DescribeModelQualityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "DescribeNotebookInstanceOutput$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role associated with the instance.

", "DescribePipelineResponse$RoleArn": "

The Amazon Resource Name (ARN) that the pipeline uses to execute.

", "DescribeProcessingJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", - "DescribeTrainingJobResponse$RoleArn": "

The AWS Identity and Access Management (IAM) role configured for the training job.

", + "DescribeTrainingJobResponse$RoleArn": "

The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.

", "FeatureGroup$RoleArn": "

The Amazon Resource Name (ARN) of the IAM execution role used to create the feature group.

", "HyperParameterTrainingJobDefinition$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role associated with the training jobs that the tuning job launches.

", "ModelPackageValidationSpecification$ValidationRole": "

The IAM roles to be used for the validation of the model package.

", @@ -8293,7 +8294,7 @@ "ProcessingJob$RoleArn": "

The ARN of the role used to create the processing job.

", "RedshiftDatasetDefinition$ClusterRoleArn": "

The IAM role attached to your Redshift cluster that Amazon SageMaker uses to generate datasets.

", "RenderUiTemplateRequest$RoleArn": "

The Amazon Resource Name (ARN) that has access to the S3 objects that are used by the template.

", - "TrainingJob$RoleArn": "

The AWS Identity and Access Management (IAM) role configured for the training job.

", + "TrainingJob$RoleArn": "

The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.

", "UpdateDeviceFleetRequest$RoleArn": "

The Amazon Resource Name (ARN) of the device.

", "UpdateImageRequest$RoleArn": "

The new Amazon Resource Name (ARN) for the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

", "UpdateNotebookInstanceInput$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access the notebook instance. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

", @@ -8504,8 +8505,8 @@ "SecretArn": { "base": null, "refs": { - "GitConfig$SecretArn": "

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

", - "GitConfigForUpdate$SecretArn": "

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

" + "GitConfig$SecretArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

", + "GitConfigForUpdate$SecretArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the credentials used to access the git repository. The secret must have a staging label of AWSCURRENT and must be in the following format:

{\"username\": UserName, \"password\": Password}

" } }, "SecurityGroupId": { @@ -8559,16 +8560,16 @@ } }, "ServiceCatalogProvisionedProductDetails": { - "base": "

Details of a provisioned service catalog product. For information about service catalog, see What is AWS Service Catalog.

", + "base": "

Details of a provisioned service catalog product. For information about service catalog, see What is Amazon Web Services Service Catalog.

", "refs": { "DescribeProjectOutput$ServiceCatalogProvisionedProductDetails": "

Information about a provisioned service catalog product.

" } }, "ServiceCatalogProvisioningDetails": { - "base": "

Details that you specify to provision a service catalog product. For information about service catalog, see .What is AWS Service Catalog.

", + "base": "

Details that you specify to provision a service catalog product. For information about service catalog, see .What is Amazon Web Services Service Catalog.

", "refs": { - "CreateProjectInput$ServiceCatalogProvisioningDetails": "

The product ID and provisioning artifact ID to provision a service catalog. For information, see What is AWS Service Catalog.

", - "DescribeProjectOutput$ServiceCatalogProvisioningDetails": "

Information used to provision a service catalog product. For information, see What is AWS Service Catalog.

" + "CreateProjectInput$ServiceCatalogProvisioningDetails": "

The product ID and provisioning artifact ID to provision a service catalog. For information, see What is Amazon Web Services Service Catalog.

", + "DescribeProjectOutput$ServiceCatalogProvisioningDetails": "

Information used to provision a service catalog product. For information, see What is Amazon Web Services Service Catalog.

" } }, "SessionExpirationDurationInSeconds": { @@ -8709,7 +8710,7 @@ } }, "SourceAlgorithm": { - "base": "

Specifies an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your Amazon SageMaker account or an algorithm in AWS Marketplace that you are subscribed to.

", + "base": "

Specifies an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your Amazon SageMaker account or an algorithm in Amazon Web Services Marketplace that you are subscribed to.

", "refs": { "SourceAlgorithmList$member": null } @@ -8881,7 +8882,7 @@ "AlgorithmStatusItem$FailureReason": "

if the overall status is Failed, the reason for the failure.

", "DescribeEdgePackagingJobResponse$EdgePackagingJobStatusMessage": "

Returns a message describing the job status and error messages.

", "DescribeEdgePackagingJobResponse$ModelSignature": "

The signature document of files in the model artifact.

", - "EdgeOutputConfig$PresetDeploymentConfig": "

The configuration used to create deployment artifacts. Specify configuration options with a JSON string. The available configuration options for each type are:

", + "EdgeOutputConfig$PresetDeploymentConfig": "

The configuration used to create deployment artifacts. Specify configuration options with a JSON string. The available configuration options for each type are:

", "EdgePresetDeploymentOutput$StatusMessage": "

Returns a message describing the status of the deployed resource.

", "EndpointInput$FeaturesAttribute": "

The attributes of the input data that are the input features.

", "EndpointInput$InferenceAttribute": "

The attribute of the input data that represents the ground truth label.

", @@ -8947,7 +8948,7 @@ "CreateActionRequest$ActionType": "

The action type.

", "CreateArtifactRequest$ArtifactType": "

The artifact type.

", "CreateContextRequest$ContextType": "

The context type.

", - "CreateUserProfileRequest$SingleSignOnUserValue": "

The username of the associated AWS Single Sign-On User for this UserProfile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.

", + "CreateUserProfileRequest$SingleSignOnUserValue": "

The username of the associated Amazon Web Services Single Sign-On User for this UserProfile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.

", "DescribeActionResponse$ActionType": "

The type of the action.

", "DescribeArtifactResponse$ArtifactType": "

The type of the artifact.

", "DescribeContextResponse$ContextType": "

The type of the context.

", @@ -9028,7 +9029,7 @@ } }, "Tag": { - "base": "

A tag object that consists of a key and an optional value, used to manage metadata for Amazon SageMaker AWS resources.

You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to Amazon SageMaker resources, see AddTags.

For more information on adding metadata to your AWS resources with tagging, see Tagging AWS resources. For advice on best practices for managing AWS resources with tagging, see Tagging Best Practices: Implement an Effective AWS Resource Tagging Strategy.

", + "base": "

A tag object that consists of a key and an optional value, used to manage metadata for Amazon SageMaker Amazon Web Services resources.

You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to Amazon SageMaker resources, see AddTags.

For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources. For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy.

", "refs": { "TagList$member": null } @@ -9049,60 +9050,60 @@ "TagList": { "base": null, "refs": { - "AddTagsInput$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", + "AddTagsInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "AddTagsOutput$Tags": "

A list of tags associated with the Amazon SageMaker resource.

", "CreateActionRequest$Tags": "

A list of tags to apply to the action.

", - "CreateAlgorithmInput$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", + "CreateAlgorithmInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateAppImageConfigRequest$Tags": "

A list of tags to apply to the AppImageConfig.

", "CreateAppRequest$Tags": "

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

", "CreateArtifactRequest$Tags": "

A list of tags to apply to the artifact.

", "CreateAutoMLJobRequest$Tags": "

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

", - "CreateCodeRepositoryInput$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", - "CreateCompilationJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", + "CreateCodeRepositoryInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", + "CreateCompilationJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateContextRequest$Tags": "

A list of tags to apply to the context.

", - "CreateDataQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", + "CreateDataQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateDeviceFleetRequest$Tags": "

Creates tags for the specified fleet.

", "CreateDomainRequest$Tags": "

Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

Tags that you specify for the Domain are also added to all Apps that the Domain launches.

", "CreateEdgePackagingJobRequest$Tags": "

Creates tags for the packaging job.

", - "CreateEndpointConfigInput$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", - "CreateEndpointInput$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", + "CreateEndpointConfigInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", + "CreateEndpointInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreateExperimentRequest$Tags": "

A list of tags to associate with the experiment. You can use Search API to search on the tags.

", "CreateFeatureGroupRequest$Tags": "

Tags used to identify Features in each FeatureGroup.

", "CreateFlowDefinitionRequest$Tags": "

An array of key-value pairs that contain metadata to help you categorize and organize a flow definition. Each tag consists of a key and a value, both of which you define.

", "CreateHumanTaskUiRequest$Tags": "

An array of key-value pairs that contain metadata to help you categorize and organize a human review workflow user interface. Each tag consists of a key and a value, both of which you define.

", - "CreateHyperParameterTuningJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

", + "CreateHyperParameterTuningJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

", "CreateImageRequest$Tags": "

A list of tags to apply to the image.

", - "CreateLabelingJobRequest$Tags": "

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", - "CreateModelBiasJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", - "CreateModelExplainabilityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", - "CreateModelInput$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", - "CreateModelPackageGroupInput$Tags": "

A list of key value pairs associated with the model group. For more information, see Tagging AWS resources in the AWS General Reference Guide.

", - "CreateModelPackageInput$Tags": "

A list of key value pairs associated with the model. For more information, see Tagging AWS resources in the AWS General Reference Guide.

", - "CreateModelQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", - "CreateMonitoringScheduleRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", - "CreateNotebookInstanceInput$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", + "CreateLabelingJobRequest$Tags": "

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelBiasJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelExplainabilityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateModelInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", + "CreateModelPackageGroupInput$Tags": "

A list of key value pairs associated with the model group. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", + "CreateModelPackageInput$Tags": "

A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", + "CreateModelQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateMonitoringScheduleRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateNotebookInstanceInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "CreatePipelineRequest$Tags": "

A list of tags to apply to the created pipeline.

", - "CreateProcessingJobRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", - "CreateProjectInput$Tags": "

An array of key-value pairs that you want to use to organize and track your AWS resource costs. For more information, see Tagging AWS resources in the AWS General Reference Guide.

", - "CreateTrainingJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", - "CreateTransformJobRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", + "CreateProcessingJobRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "CreateProjectInput$Tags": "

An array of key-value pairs that you want to use to organize and track your Amazon Web Services resource costs. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", + "CreateTrainingJobRequest$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", + "CreateTransformJobRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateTrialComponentRequest$Tags": "

A list of tags to associate with the component. You can use Search API to search on the tags.

", "CreateTrialRequest$Tags": "

A list of tags to associate with the trial. You can use Search API to search on the tags.

", "CreateUserProfileRequest$Tags": "

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

Tags that you specify for the User Profile are also added to all Apps that the User Profile launches.

", "CreateWorkforceRequest$Tags": "

An array of key-value pairs that contain metadata to help you categorize and organize our workforce. Each tag consists of a key and a value, both of which you define.

", - "CreateWorkteamRequest$Tags": "

An array of key-value pairs.

For more information, see Resource Tag and Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", - "DescribeLabelingJobResponse$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", - "Endpoint$Tags": "

A list of the tags associated with the endpoint. For more information, see Tagging AWS resources in the AWS General Reference Guide.

", + "CreateWorkteamRequest$Tags": "

An array of key-value pairs.

For more information, see Resource Tag and Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", + "DescribeLabelingJobResponse$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", + "Endpoint$Tags": "

A list of the tags associated with the endpoint. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", "Experiment$Tags": "

The list of tags that are associated with the experiment. You can use Search API to search on the tags.

", "FeatureGroup$Tags": "

Tags used to define a FeatureGroup.

", "ListTagsOutput$Tags": "

An array of Tag objects, each with a tag key and a value.

", - "ModelPackage$Tags": "

A list of the tags associated with the model package. For more information, see Tagging AWS resources in the AWS General Reference Guide.

", - "ModelPackageGroup$Tags": "

A list of the tags associated with the model group. For more information, see Tagging AWS resources in the AWS General Reference Guide.

", - "MonitoringSchedule$Tags": "

A list of the tags associated with the monitoring schedlue. For more information, see Tagging AWS resources in the AWS General Reference Guide.

", + "ModelPackage$Tags": "

A list of the tags associated with the model package. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", + "ModelPackageGroup$Tags": "

A list of the tags associated with the model group. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", + "MonitoringSchedule$Tags": "

A list of the tags associated with the monitoring schedlue. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", "Pipeline$Tags": "

A list of tags that apply to the pipeline.

", - "ProcessingJob$Tags": "

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", + "ProcessingJob$Tags": "

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "RegisterDevicesRequest$Tags": "

The tags associated with devices.

", - "TrainingJob$Tags": "

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

", + "TrainingJob$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", "TransformJob$Tags": "

A list of tags associated with the transform job.

", "Trial$Tags": "

The list of tags that are associated with the trial. You can use Search API to search on the tags.

", "TrialComponent$Tags": "

The list of tags that are associated with the component. You can use Search API to search on the tags.

" @@ -9163,7 +9164,7 @@ "TaskAvailabilityLifetimeInSeconds": { "base": null, "refs": { - "HumanTaskConfig$TaskAvailabilityLifetimeInSeconds": "

The length of time that a task remains available for labeling by human workers. The default and maximum values for this parameter depend on the type of workforce you use.

" + "HumanTaskConfig$TaskAvailabilityLifetimeInSeconds": "

The length of time that a task remains available for labeling by human workers. The default and maximum values for this parameter depend on the type of workforce you use.

" } }, "TaskCount": { @@ -9202,7 +9203,7 @@ "TaskTimeLimitInSeconds": { "base": null, "refs": { - "HumanTaskConfig$TaskTimeLimitInSeconds": "

The amount of time that a worker has to complete a task.

If you create a custom labeling job, the maximum value for this parameter is 8 hours (28,800 seconds).

If you create a labeling job using a built-in task type the maximum for this parameter depends on the task type you use:

" + "HumanTaskConfig$TaskTimeLimitInSeconds": "

The amount of time that a worker has to complete a task.

If you create a custom labeling job, the maximum value for this parameter is 8 hours (28,800 seconds).

If you create a labeling job using a built-in task type the maximum for this parameter depends on the task type you use:

" } }, "TaskTitle": { @@ -9258,9 +9259,9 @@ "ThingName": { "base": null, "refs": { - "DescribeDeviceResponse$IotThingName": "

The AWS Internet of Things (IoT) object thing name associated with the device.

", - "Device$IotThingName": "

AWS Internet of Things (IoT) object name.

", - "DeviceSummary$IotThingName": "

The AWS Internet of Things (IoT) object thing name associated with the device..

" + "DescribeDeviceResponse$IotThingName": "

The Amazon Web Services Internet of Things (IoT) object thing name associated with the device.

", + "Device$IotThingName": "

Amazon Web Services Internet of Things (IoT) object name.

", + "DeviceSummary$IotThingName": "

The Amazon Web Services Internet of Things (IoT) object thing name associated with the device..

" } }, "Timestamp": { @@ -9654,7 +9655,7 @@ "TrainingJobName": { "base": null, "refs": { - "CreateTrainingJobRequest$TrainingJobName": "

The name of the training job. The name must be unique within an AWS Region in an AWS account.

", + "CreateTrainingJobRequest$TrainingJobName": "

The name of the training job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account.

", "DescribeTrainingJobRequest$TrainingJobName": "

The name of the training job.

", "DescribeTrainingJobResponse$TrainingJobName": "

Name of the model training job.

", "HyperParameterTrainingJobSummary$TrainingJobName": "

The name of the training job.

", @@ -9811,7 +9812,7 @@ "TransformJobName": { "base": null, "refs": { - "CreateTransformJobRequest$TransformJobName": "

The name of the transform job. The name must be unique within an AWS Region in an AWS account.

", + "CreateTransformJobRequest$TransformJobName": "

The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account.

", "DescribeTransformJobRequest$TransformJobName": "

The name of the transform job that you want to view details of.

", "DescribeTransformJobResponse$TransformJobName": "

The name of the transform job.

", "StopTransformJobRequest$TransformJobName": "

The name of the transform job to stop.

", @@ -10335,7 +10336,7 @@ "base": null, "refs": { "AutoMLContainerDefinition$ModelDataUrl": "

The location of the model artifacts. For more information, see .

", - "ContainerDefinition$ModelDataUrl": "

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common Parameters.

The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are creating.

If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

If you use a built-in algorithm to create a model, Amazon SageMaker requires that you provide a S3 path to the model artifacts in ModelDataUrl.

", + "ContainerDefinition$ModelDataUrl": "

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but not if you use your own algorithms. For more information on built-in algorithms, see Common Parameters.

The model artifacts must be in an S3 bucket that is in the same region as the model or endpoint you are creating.

If you provide a value for this parameter, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provide. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

If you use a built-in algorithm to create a model, Amazon SageMaker requires that you provide a S3 path to the model artifacts in ModelDataUrl.

", "ModelPackageContainerDefinition$ModelDataUrl": "

The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).

The model artifacts must be in an S3 bucket that is in the same region as the model package.

", "SourceAlgorithm$ModelDataUrl": "

The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).

The model artifacts must be in an S3 bucket that is in the same region as the algorithm.

" } @@ -10528,10 +10529,10 @@ } }, "Workforce": { - "base": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", + "base": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each Amazon Web Services Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", "refs": { - "DescribeWorkforceResponse$Workforce": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", - "UpdateWorkforceResponse$Workforce": "

A single private workforce. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", + "DescribeWorkforceResponse$Workforce": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each Amazon Web Services Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", + "UpdateWorkforceResponse$Workforce": "

A single private workforce. You can create one private work force in each Amazon Web Services Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", "Workforces$member": null } }, diff --git a/models/apis/sqs/2012-11-05/docs-2.json b/models/apis/sqs/2012-11-05/docs-2.json index 2bad6866bb2..6b0db236c09 100644 --- a/models/apis/sqs/2012-11-05/docs-2.json +++ b/models/apis/sqs/2012-11-05/docs-2.json @@ -1,39 +1,39 @@ { "version": "2.0", - "service": "

Welcome to the Amazon Simple Queue Service API Reference.

Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted queue for storing messages as they travel between applications or microservices. Amazon SQS moves data between distributed application components and helps you decouple these components.

For information on the permissions you need to use this API, see Identity and access management in the Amazon Simple Queue Service Developer Guide.

You can use AWS SDKs to access Amazon SQS using your favorite programming language. The SDKs perform tasks such as the following automatically:

Additional information

", + "service": "

Welcome to the Amazon SQS API Reference.

Amazon SQS is a reliable, highly-scalable hosted queue for storing messages as they travel between applications or microservices. Amazon SQS moves data between distributed application components and helps you decouple these components.

For information on the permissions you need to use this API, see Identity and access management in the Amazon SQS Developer Guide.

You can use Amazon Web Services SDKs to access Amazon SQS using your favorite programming language. The SDKs perform tasks such as the following automatically:

Additional information

", "operations": { - "AddPermission": "

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

", - "ChangeMessageVisibility": "

Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to the maximum allowed time. If you try to extend the visibility timeout beyond the maximum, your request is rejected.

An Amazon SQS message has three basic states:

  1. Sent to a queue by a producer.

  2. Received from the queue by a consumer.

  3. Deleted from the queue.

A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of inflight messages.

Limits that apply to inflight messages are unrelated to the unlimited number of stored messages.

For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request.

For FIFO queues, there can be a maximum of 20,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

", + "AddPermission": "

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

", + "ChangeMessageVisibility": "

Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide.

For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to the maximum allowed time. If you try to extend the visibility timeout beyond the maximum, your request is rejected.

An Amazon SQS message has three basic states:

  1. Sent to a queue by a producer.

  2. Received from the queue by a consumer.

  3. Deleted from the queue.

A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of inflight messages.

Limits that apply to inflight messages are unrelated to the unlimited number of stored messages.

For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request.

For FIFO queues, there can be a maximum of 20,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

", "ChangeMessageVisibilityBatch": "

Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

", - "CreateQueue": "

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind:

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

After you create a queue, you must wait at least one second after the queue is created to be able to use the queue.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

", + "CreateQueue": "

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind:

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

After you create a queue, you must wait at least one second after the queue is created to be able to use the queue.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

", "DeleteMessage": "

Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue.

The ReceiptHandle is associated with a specific instance of receiving a message. If you receive a message more than once, the ReceiptHandle is different each time you receive a message. When you use the DeleteMessage action, you must provide the most recently received ReceiptHandle for the message (otherwise, the request succeeds, but the message might not be deleted).

For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues.

", "DeleteMessageBatch": "

Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the response.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

", - "DeleteQueue": "

Deletes the queue specified by the QueueUrl, regardless of the queue's contents.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

", + "DeleteQueue": "

Deletes the queue specified by the QueueUrl, regardless of the queue's contents.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

", "GetQueueAttributes": "

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

", - "GetQueueUrl": "

Returns the URL of an existing Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

", - "ListDeadLetterSourceQueues": "

Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead-letter queue.

The ListDeadLetterSourceQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to ListDeadLetterSourceQueues to receive the next page of results.

For more information about using dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

", - "ListQueueTags": "

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

", - "ListQueues": "

Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

", + "GetQueueUrl": "

Returns the URL of an existing Amazon SQS queue.

To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide.

", + "ListDeadLetterSourceQueues": "

Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead-letter queue.

The ListDeadLetterSourceQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to ListDeadLetterSourceQueues to receive the next page of results.

For more information about using dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon SQS Developer Guide.

", + "ListQueueTags": "

List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

", + "ListQueues": "

Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

", "PurgeQueue": "

Deletes the messages in a queue specified by the QueueURL parameter.

When you use the PurgeQueue action, you can't retrieve any messages deleted from a queue.

The message deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's size.

Messages sent to the queue before you call PurgeQueue might be received but are deleted within the next minute.

Messages sent to the queue after you call PurgeQueue might be deleted while the queue is being purged.

", - "ReceiveMessage": "

Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon Simple Queue Service Developer Guide.

Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.

For each message returned, the response includes the following:

The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

", - "RemovePermission": "

Revokes any permissions in the queue policy that matches the specified Label parameter.

", + "ReceiveMessage": "

Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon SQS Developer Guide.

Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.

For each message returned, the response includes the following:

The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide.

You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer Guide.

A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

", + "RemovePermission": "

Revokes any permissions in the queue policy that matches the specified Label parameter.

", "SendMessage": "

Delivers a message to the specified queue.

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

", "SendMessageBatch": "

Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

", - "SetQueueAttributes": "

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

", - "TagQueue": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

", - "UntagQueue": "

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

" + "SetQueueAttributes": "

Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

", + "TagQueue": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

When you use queue tags, keep the following guidelines in mind:

For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

", + "UntagQueue": "

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

" }, "shapes": { "AWSAccountIdList": { "base": null, "refs": { - "AddPermissionRequest$AWSAccountIds": "

The AWS account number of the principal who is given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon Simple Queue Service Developer Guide.

" + "AddPermissionRequest$AWSAccountIds": "

The account numbers of the principals who are to receive permission. For information about locating the account identification, see Your Amazon Web Services Identifiers in the Amazon SQS Developer Guide.

" } }, "ActionNameList": { "base": null, "refs": { - "AddPermissionRequest$Actions": "

The action the client wants to allow for the specified principal. Valid values: the name of any action or *.

For more information about these actions, see Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource in the Amazon Simple Queue Service Developer Guide.

Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

" + "AddPermissionRequest$Actions": "

The action the client wants to allow for the specified principal. Valid values: the name of any action or *.

For more information about these actions, see Overview of Managing Access Permissions to Your Amazon Simple Queue Service Resource in the Amazon SQS Developer Guide.

Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.

" } }, "AddPermissionRequest": { @@ -44,8 +44,8 @@ "AttributeNameList": { "base": null, "refs": { - "GetQueueAttributesRequest$AttributeNames": "

A list of attributes for which to retrieve information.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, and ApproximateNumberOfMessagesVisible metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency.

The following attributes apply only to server-side-encryption:

The following attributes apply only to FIFO (first-in-first-out) queues:

The following attributes apply only to high throughput for FIFO queues:

To enable high throughput for FIFO queues, do the following:

If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

For information on throughput quotas, see Quotas related to messages in the Amazon Simple Queue Service Developer Guide.

", - "ReceiveMessageRequest$AttributeNames": "

A list of attributes that need to be returned along with each message. These attributes include:

" + "GetQueueAttributesRequest$AttributeNames": "

A list of attributes for which to retrieve information.

The AttributeName.N parameter is optional, but if you don't specify values for this parameter, the request returns empty results.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, and ApproximateNumberOfMessagesVisible metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency.

The following attributes apply only to server-side-encryption:

The following attributes apply only to FIFO (first-in-first-out) queues:

The following attributes apply only to high throughput for FIFO queues:

To enable high throughput for FIFO queues, do the following:

If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

", + "ReceiveMessageRequest$AttributeNames": "

A list of attributes that need to be returned along with each message. These attributes include:

" } }, "BatchEntryIdsNotDistinct": { @@ -214,7 +214,7 @@ } }, "GetQueueUrlResult": { - "base": "

For more information, see Interpreting Responses in the Amazon Simple Queue Service Developer Guide.

", + "base": "

For more information, see Interpreting Responses in the Amazon SQS Developer Guide.

", "refs": { } }, @@ -307,16 +307,16 @@ "MessageBodyAttributeMap": { "base": null, "refs": { - "Message$MessageAttributes": "

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", - "SendMessageBatchRequestEntry$MessageAttributes": "

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", - "SendMessageRequest$MessageAttributes": "

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

" + "Message$MessageAttributes": "

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer Guide.

", + "SendMessageBatchRequestEntry$MessageAttributes": "

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer Guide.

", + "SendMessageRequest$MessageAttributes": "

Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer Guide.

" } }, "MessageBodySystemAttributeMap": { "base": null, "refs": { - "SendMessageBatchRequestEntry$MessageSystemAttributes": "

The message system attribute to send Each message system attribute consists of a Name, Type, and Value.

", - "SendMessageRequest$MessageSystemAttributes": "

The message system attribute to send. Each message system attribute consists of a Name, Type, and Value.

" + "SendMessageBatchRequestEntry$MessageSystemAttributes": "

The message system attribute to send Each message system attribute consists of a Name, Type, and Value.

", + "SendMessageRequest$MessageSystemAttributes": "

The message system attribute to send. Each message system attribute consists of a Name, Type, and Value.

" } }, "MessageList": { @@ -372,9 +372,9 @@ "QueueAttributeMap": { "base": null, "refs": { - "CreateQueueRequest$Attributes": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

The following attributes apply only to server-side-encryption:

The following attributes apply only to FIFO (first-in-first-out) queues:

The following attributes apply only to high throughput for FIFO queues:

To enable high throughput for FIFO queues, do the following:

If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

For information on throughput quotas, see Quotas related to messages in the Amazon Simple Queue Service Developer Guide.

", + "CreateQueueRequest$Attributes": "

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

The following attributes apply only to server-side-encryption:

The following attributes apply only to FIFO (first-in-first-out) queues:

The following attributes apply only to high throughput for FIFO queues:

To enable high throughput for FIFO queues, do the following:

If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

", "GetQueueAttributesResult$Attributes": "

A map of attributes to their respective values.

", - "SetQueueAttributesRequest$Attributes": "

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

The following attributes apply only to server-side-encryption:

The following attribute applies only to FIFO (first-in-first-out) queues:

The following attributes apply only to high throughput for FIFO queues:

To enable high throughput for FIFO queues, do the following:

If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

For information on throughput quotas, see Quotas related to messages in the Amazon Simple Queue Service Developer Guide.

" + "SetQueueAttributesRequest$Attributes": "

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

The following attributes apply only to server-side-encryption:

The following attribute applies only to FIFO (first-in-first-out) queues:

The following attributes apply only to high throughput for FIFO queues:

To enable high throughput for FIFO queues, do the following:

If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

" } }, "QueueAttributeName": { @@ -502,34 +502,34 @@ "DeleteQueueRequest$QueueUrl": "

The URL of the Amazon SQS queue to delete.

Queue URLs and names are case-sensitive.

", "GetQueueAttributesRequest$QueueUrl": "

The URL of the Amazon SQS queue whose attribute information is retrieved.

Queue URLs and names are case-sensitive.

", "GetQueueUrlRequest$QueueName": "

The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values: alphanumeric characters, hyphens (-), and underscores (_).

Queue URLs and names are case-sensitive.

", - "GetQueueUrlRequest$QueueOwnerAWSAccountId": "

The AWS account ID of the account that created the queue.

", + "GetQueueUrlRequest$QueueOwnerAWSAccountId": "

The account ID of the account that created the queue.

", "GetQueueUrlResult$QueueUrl": "

The URL of the queue.

", "ListDeadLetterSourceQueuesRequest$QueueUrl": "

The URL of a dead-letter queue.

Queue URLs and names are case-sensitive.

", "ListQueueTagsRequest$QueueUrl": "

The URL of the queue.

", "ListQueuesRequest$QueueNamePrefix": "

A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.

Queue URLs and names are case-sensitive.

", - "Message$MessageId": "

A unique identifier for the message. A MessageIdis considered unique across all AWS accounts for an extended period of time.

", + "Message$MessageId": "

A unique identifier for the message. A MessageIdis considered unique across all accounts for an extended period of time.

", "Message$ReceiptHandle": "

An identifier associated with the act of receiving the message. A new receipt handle is returned every time you receive a message. When deleting a message, you provide the last received receipt handle to delete the message.

", "Message$MD5OfBody": "

An MD5 digest of the non-URL-encoded message body string.

", "Message$Body": "

The message's contents (not URL-encoded).

", "Message$MD5OfMessageAttributes": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

", "MessageAttributeValue$StringValue": "

Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters.

", - "MessageAttributeValue$DataType": "

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", + "MessageAttributeValue$DataType": "

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon SQS Developer Guide.

", "MessageBodyAttributeMap$key": null, "MessageSystemAttributeMap$value": null, "MessageSystemAttributeValue$StringValue": "

Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters.

", - "MessageSystemAttributeValue$DataType": "

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon Simple Queue Service Developer Guide.

", + "MessageSystemAttributeValue$DataType": "

Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon SQS Developer Guide.

", "PurgeQueueRequest$QueueUrl": "

The URL of the queue from which the PurgeQueue action deletes messages.

Queue URLs and names are case-sensitive.

", "QueueAttributeMap$value": null, "QueueUrlList$member": null, "ReceiveMessageRequest$QueueUrl": "

The URL of the Amazon SQS queue from which messages are received.

Queue URLs and names are case-sensitive.

", - "ReceiveMessageRequest$ReceiveRequestAttemptId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, it is possible to retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide.

", + "ReceiveMessageRequest$ReceiveRequestAttemptId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, it is possible to retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon SQS Developer Guide.

", "RemovePermissionRequest$QueueUrl": "

The URL of the Amazon SQS queue from which permissions are removed.

Queue URLs and names are case-sensitive.

", "RemovePermissionRequest$Label": "

The identification of the permission to remove. This is the label added using the AddPermission action.

", "SendMessageBatchRequest$QueueUrl": "

The URL of the Amazon SQS queue to which batched messages are sent.

Queue URLs and names are case-sensitive.

", "SendMessageBatchRequestEntry$Id": "

An identifier for a message in this batch used to communicate the result.

The Ids of a batch request need to be unique within a request.

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

", "SendMessageBatchRequestEntry$MessageBody": "

The body of the message.

", - "SendMessageBatchRequestEntry$MessageDeduplicationId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-once processing in the Amazon Simple Queue Service Developer Guide.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

", - "SendMessageBatchRequestEntry$MessageGroupId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

", + "SendMessageBatchRequestEntry$MessageDeduplicationId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-once processing in the Amazon SQS Developer Guide.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon SQS Developer Guide.

", + "SendMessageBatchRequestEntry$MessageGroupId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

", "SendMessageBatchResultEntry$Id": "

An identifier for the message in this batch.

", "SendMessageBatchResultEntry$MessageId": "

An identifier for the message.

", "SendMessageBatchResultEntry$MD5OfMessageBody": "

An MD5 digest of the non-URL-encoded message body string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

", @@ -538,12 +538,12 @@ "SendMessageBatchResultEntry$SequenceNumber": "

This parameter applies only to FIFO (first-in-first-out) queues.

The large, non-consecutive number that Amazon SQS assigns to each message.

The length of SequenceNumber is 128 bits. As SequenceNumber continues to increase for a particular MessageGroupId.

", "SendMessageRequest$QueueUrl": "

The URL of the Amazon SQS queue to which a message is sent.

Queue URLs and names are case-sensitive.

", "SendMessageRequest$MessageBody": "

The message to send. The minimum size is one character. The maximum size is 256 KB.

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

", - "SendMessageRequest$MessageDeduplicationId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-once processing in the Amazon Simple Queue Service Developer Guide.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The maximum length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

", - "SendMessageRequest$MessageGroupId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

", + "SendMessageRequest$MessageDeduplicationId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-once processing in the Amazon SQS Developer Guide.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The maximum length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon SQS Developer Guide.

", + "SendMessageRequest$MessageGroupId": "

This parameter applies only to FIFO (first-in-first-out) queues.

The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion.

The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer Guide.

MessageGroupId is required for FIFO queues. You can't use it for Standard queues.

", "SendMessageResult$MD5OfMessageBody": "

An MD5 digest of the non-URL-encoded message body string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

", "SendMessageResult$MD5OfMessageAttributes": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

", "SendMessageResult$MD5OfMessageSystemAttributes": "

An MD5 digest of the non-URL-encoded message system attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest.

", - "SendMessageResult$MessageId": "

An attribute containing the MessageId of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon Simple Queue Service Developer Guide.

", + "SendMessageResult$MessageId": "

An attribute containing the MessageId of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide.

", "SendMessageResult$SequenceNumber": "

This parameter applies only to FIFO (first-in-first-out) queues.

The large, non-consecutive number that Amazon SQS assigns to each message.

The length of SequenceNumber is 128 bits. SequenceNumber continues to increase for a particular MessageGroupId.

", "SetQueueAttributesRequest$QueueUrl": "

The URL of the Amazon SQS queue whose attributes are set.

Queue URLs and names are case-sensitive.

", "StringList$member": null, @@ -574,7 +574,7 @@ "TagMap": { "base": null, "refs": { - "CreateQueueRequest$tags": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon Simple Queue Service Developer Guide.

When you use queue tags, keep the following guidelines in mind:

For a full list of tag restrictions, see Limits Related to Queues in the Amazon Simple Queue Service Developer Guide.

To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon Simple Queue Service Developer Guide.

", + "CreateQueueRequest$tags": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

When you use queue tags, keep the following guidelines in mind:

For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide.

To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions.

Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

", "ListQueueTagsResult$Tags": "

The list of all tags added to the specified queue.

", "TagQueueRequest$Tags": "

The list of tags to be added to the specified queue.

" } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 552dca1ef04..72ae6c9cd76 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -865,6 +865,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2896,6 +2897,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -6503,6 +6505,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, diff --git a/service/ec2/api.go b/service/ec2/api.go index 42c06eba446..c85e7824cec 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -6719,7 +6719,7 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub // CreateSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud. // // Creates a data feed for Spot Instances, enabling you to view Spot Instance -// usage logs. You can create one data feed per AWS account. For more information, +// usage logs. You can create one data feed per account. For more information, // see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) // in the Amazon EC2 User Guide for Linux Instances. // @@ -14136,7 +14136,7 @@ func (c *EC2) DescribeCapacityReservationsRequest(input *DescribeCapacityReserva // DescribeCapacityReservations API operation for Amazon Elastic Compute Cloud. // // Describes one or more of your Capacity Reservations. The results describe -// only the Capacity Reservations in the AWS Region that you're currently using. +// only the Capacity Reservations in the Region that you're currently using. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -31158,9 +31158,9 @@ func (c *EC2) GetCapacityReservationUsageRequest(input *GetCapacityReservationUs // // Gets usage information about a Capacity Reservation. If the Capacity Reservation // is shared, it shows usage information for the Capacity Reservation owner -// and each AWS account that is currently using the shared capacity. If the -// Capacity Reservation is not shared, it shows only the Capacity Reservation -// owner's usage. +// and each account that is currently using the shared capacity. If the Capacity +// Reservation is not shared, it shows only the Capacity Reservation owner's +// usage. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -34059,14 +34059,14 @@ func (c *EC2) ModifyDefaultCreditSpecificationRequest(input *ModifyDefaultCredit // ModifyDefaultCreditSpecification API operation for Amazon Elastic Compute Cloud. // // Modifies the default credit option for CPU usage of burstable performance -// instances. The default credit option is set at the account level per AWS -// Region, and is specified per instance family. All new burstable performance -// instances in the account launch using the default credit option. +// instances. The default credit option is set at the account level per Region, +// and is specified per instance family. All new burstable performance instances +// in the account launch using the default credit option. // // ModifyDefaultCreditSpecification is an asynchronous operation, which works -// at an AWS Region level and modifies the credit option for each Availability -// Zone. All zones in a Region are updated within five minutes. But if instances -// are launched during this operation, they might not get the new credit option +// at an Region level and modifies the credit option for each Availability Zone. +// All zones in a Region are updated within five minutes. But if instances are +// launched during this operation, they might not get the new credit option // until the zone is updated. To verify whether the update has occurred, you // can call GetDefaultCreditSpecification and check DefaultCreditSpecification // for updates. @@ -41239,11 +41239,7 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // released and you are not billed for instance usage. However, your root partition // Amazon EBS volume remains and continues to persist your data, and you are // charged for Amazon EBS volume usage. You can restart your instance at any -// time. Every time you start your Windows instance, Amazon EC2 charges you -// for a full instance hour. If you stop and restart your Windows instance, -// a new instance hour begins and Amazon EC2 charges you for another full instance -// hour even if you are still within the same 60-minute period when it was stopped. -// Every time you start your Linux instance, Amazon EC2 charges a one-minute +// time. Every time you start your instance, Amazon EC2 charges a one-minute // minimum for instance usage, and thereafter charges per second for instance // usage. // @@ -41497,12 +41493,8 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // We don't charge usage for a stopped instance, or data transfer fees; however, // your root partition Amazon EBS volume remains and continues to persist your // data, and you are charged for Amazon EBS volume usage. Every time you start -// your Windows instance, Amazon EC2 charges you for a full instance hour. If -// you stop and restart your Windows instance, a new instance hour begins and -// Amazon EC2 charges you for another full instance hour even if you are still -// within the same 60-minute period when it was stopped. Every time you start -// your Linux instance, Amazon EC2 charges a one-minute minimum for instance -// usage, and thereafter charges per second for instance usage. +// your instance, Amazon EC2 charges a one-minute minimum for instance usage, +// and thereafter charges per second for instance usage. // // You can't stop or hibernate instance store-backed instances. You can't use // the Stop action to hibernate Spot Instances, but you can specify that Amazon @@ -41686,6 +41678,36 @@ func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *re // If you specify multiple instances and the request fails (for example, because // of a single incorrect instance ID), none of the instances are terminated. // +// If you terminate multiple instances across multiple Availability Zones, and +// one or more of the specified instances are enabled for termination protection, +// the request fails with the following results: +// +// * The specified instances that are in the same Availability Zone as the +// protected instance are not terminated. +// +// * The specified instances that are in different Availability Zones, where +// no other specified instances are protected, are successfully terminated. +// +// For example, say you have the following instances: +// +// * Instance A: us-east-1a; Not protected +// +// * Instance B: us-east-1a; Not protected +// +// * Instance C: us-east-1b; Protected +// +// * Instance D: us-east-1b; not protected +// +// If you attempt to terminate all of these instances in the same request, the +// request reports failure with the following results: +// +// * Instance A and Instance B are successfully terminated because none of +// the specified instances in us-east-1a are enabled for termination protection. +// +// * Instance C and Instance D fail to terminate because at least one of +// the specified instances in us-east-1b (Instance C) is enabled for termination +// protection. +// // Terminated instances remain visible after termination (for approximately // one hour). // @@ -47906,7 +47928,7 @@ type CapacityReservation struct { // was created. OutpostArn *string `locationName:"outpostArn" type:"string"` - // The ID of the AWS account that owns the Capacity Reservation. + // The ID of the account that owns the Capacity Reservation. OwnerId *string `locationName:"ownerId" type:"string"` // The date and time at which the Capacity Reservation was started. @@ -47940,10 +47962,10 @@ type CapacityReservation struct { // can have one of the following tenancy settings: // // * default - The Capacity Reservation is created on hardware that is shared - // with other AWS accounts. + // with other accounts. // // * dedicated - The Capacity Reservation is created on single-tenant hardware - // that is dedicated to a single AWS account. + // that is dedicated to a single account. Tenancy *string `locationName:"tenancy" type:"string" enum:"CapacityReservationTenancy"` // The total number of instances for which the Capacity Reservation reserves @@ -48088,7 +48110,7 @@ type CapacityReservationGroup struct { // The ARN of the resource group. GroupArn *string `locationName:"groupArn" type:"string"` - // The ID of the AWS account that owns the resource group. + // The ID of the account that owns the resource group. OwnerId *string `locationName:"ownerId" type:"string"` } @@ -49767,7 +49789,7 @@ func (s *ConfirmProductInstanceInput) SetProductCode(v string) *ConfirmProductIn type ConfirmProductInstanceOutput struct { _ struct{} `type:"structure"` - // The AWS account ID of the instance owner. This is only present if the product + // The account ID of the instance owner. This is only present if the product // code is attached to the instance. OwnerId *string `locationName:"ownerId" type:"string"` @@ -50721,10 +50743,10 @@ type CreateCapacityReservationInput struct { // can have one of the following tenancy settings: // // * default - The Capacity Reservation is created on hardware that is shared - // with other AWS accounts. + // with other accounts. // // * dedicated - The Capacity Reservation is created on single-tenant hardware - // that is dedicated to a single AWS account. + // that is dedicated to a single account. Tenancy *string `type:"string" enum:"CapacityReservationTenancy"` } @@ -51861,6 +51883,9 @@ type CreateFleetInput struct { // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string"` + // Reserved. + Context *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -51889,10 +51914,15 @@ type CreateFleetInput struct { // Describes the configuration of Spot Instances in an EC2 Fleet. SpotOptions *SpotOptionsRequest `type:"structure"` - // The key-value pair for tagging the EC2 Fleet request on creation. The value - // for ResourceType must be fleet, otherwise the fleet request fails. To tag - // instances at launch, specify the tags in the launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template). - // For information about tagging after launch, see Tagging your resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + // The key-value pair for tagging the EC2 Fleet request on creation. For more + // information, see Tagging your resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + // + // If the fleet type is instant, specify a resource type of fleet to tag the + // fleet or instance to tag the instances at launch. + // + // If the fleet type is maintain or request, specify a resource type of fleet + // to tag the fleet. You cannot specify a resource type of instance. To tag + // instances at launch, specify the tags in a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template). TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The number of units to request. @@ -51904,7 +51934,7 @@ type CreateFleetInput struct { // expires. TerminateInstancesWithExpiration *bool `type:"boolean"` - // The type of request. The default value is maintain. + // The fleet type. The default value is maintain. // // * maintain - The EC2 Fleet places an asynchronous request for your desired // capacity, and continues to maintain your desired Spot capacity by replenishing @@ -51980,6 +52010,12 @@ func (s *CreateFleetInput) SetClientToken(v string) *CreateFleetInput { return s } +// SetContext sets the Context field's value. +func (s *CreateFleetInput) SetContext(v string) *CreateFleetInput { + s.Context = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateFleetInput) SetDryRun(v bool) *CreateFleetInput { s.DryRun = &v @@ -52119,14 +52155,14 @@ type CreateFleetOutput struct { _ struct{} `type:"structure"` // Information about the instances that could not be launched by the fleet. - // Valid only when Type is set to instant. + // Supported only for fleets of type instant. Errors []*CreateFleetError `locationName:"errorSet" locationNameList:"item" type:"list"` // The ID of the EC2 Fleet. FleetId *string `locationName:"fleetId" type:"string"` - // Information about the instances that were launched by the fleet. Valid only - // when Type is set to instant. + // Information about the instances that were launched by the fleet. Supported + // only for fleets of type instant. Instances []*CreateFleetInstance `locationName:"fleetInstanceSet" locationNameList:"item" type:"list"` } @@ -64214,7 +64250,7 @@ type DescribeCapacityReservationsInput struct { // * instance-type - The type of instance for which the Capacity Reservation // reserves capacity. // - // * owner-id - The ID of the AWS account that owns the Capacity Reservation. + // * owner-id - The ID of the account that owns the Capacity Reservation. // // * availability-zone-id - The Availability Zone ID of the Capacity Reservation. // @@ -64226,8 +64262,8 @@ type DescribeCapacityReservationsInput struct { // * tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity // Reservation can have one of the following tenancy settings: default - // The Capacity Reservation is created on hardware that is shared with other - // AWS accounts. dedicated - The Capacity Reservation is created on single-tenant - // hardware that is dedicated to a single AWS account. + // accounts. dedicated - The Capacity Reservation is created on single-tenant + // hardware that is dedicated to a single account. // // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost on which // the Capacity Reservation was created. @@ -68327,8 +68363,8 @@ type DescribeInstanceAttributeOutput struct { // Indicates whether enhanced networking with ENA is enabled. EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` - // To enable the instance for AWS Nitro Enclaves, set this parameter to true; - // otherwise, set it to false. + // To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter + // to true; otherwise, set it to false. EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` // The security groups associated with the instance. @@ -69320,7 +69356,7 @@ type DescribeInstancesInput struct { // * network-interface.requester-id - The requester ID for the network interface. // // * network-interface.requester-managed - Indicates whether the network - // interface is being managed by AWS. + // interface is being managed by Amazon Web Services. // // * network-interface.status - The status of the network interface (available) // | in-use). @@ -69337,7 +69373,7 @@ type DescribeInstancesInput struct { // // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. // - // * owner-id - The AWS account ID of the instance owner. + // * owner-id - The account ID of the instance owner. // // * placement-group-name - The name of the placement group for the instance. // @@ -69362,7 +69398,7 @@ type DescribeInstancesInput struct { // Similar to the state-reason-code filter. // // * requester-id - The ID of the entity that launched the instance on your - // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // behalf (for example, Management Console, Auto Scaling, and so on). // // * reservation-id - The ID of the instance's reservation. A reservation // ID is created any time you launch an instance. A reservation ID has a @@ -73015,8 +73051,8 @@ type DescribeReservedInstancesOfferingsInput struct { // // * marketplace - Set to true to show only Reserved Instance Marketplace // offerings. When this filter is not used, which is the default behavior, - // all offerings from both AWS and the Reserved Instance Marketplace are - // listed. + // all offerings from both Amazon Web Services and the Reserved Instance + // Marketplace are listed. // // * product-description - The Reserved Instance product platform description. // Instances that include (Amazon VPC) in the product platform description @@ -83069,12 +83105,13 @@ func (s *EnableVpcClassicLinkOutput) SetReturn(v bool) *EnableVpcClassicLinkOutp return s } -// Indicates whether the instance is enabled for AWS Nitro Enclaves. +// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. type EnclaveOptions struct { _ struct{} `type:"structure"` - // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; - // otherwise, it is not enabled for AWS Nitro Enclaves. + // If this parameter is set to true, the instance is enabled for Amazon Web + // Services Nitro Enclaves; otherwise, it is not enabled for Amazon Web Services + // Nitro Enclaves. Enabled *bool `locationName:"enabled" type:"boolean"` } @@ -83094,13 +83131,14 @@ func (s *EnclaveOptions) SetEnabled(v bool) *EnclaveOptions { return s } -// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more -// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) -// in the AWS Nitro Enclaves User Guide. +// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. +// For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +// in the Amazon Web Services Nitro Enclaves User Guide. type EnclaveOptionsRequest struct { _ struct{} `type:"structure"` - // To enable the instance for AWS Nitro Enclaves, set this parameter to true. + // To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter + // to true. Enabled *bool `type:"boolean"` } @@ -84660,6 +84698,9 @@ type FleetData struct { // Constraints: Maximum 64 ASCII characters ClientToken *string `locationName:"clientToken" type:"string"` + // Reserved. + Context *string `locationName:"context" type:"string"` + // The creation date and time of the EC2 Fleet. CreateTime *time.Time `locationName:"createTime" type:"timestamp"` @@ -84760,6 +84801,12 @@ func (s *FleetData) SetClientToken(v string) *FleetData { return s } +// SetContext sets the Context field's value. +func (s *FleetData) SetContext(v string) *FleetData { + s.Context = &v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *FleetData) SetCreateTime(v time.Time) *FleetData { s.CreateTime = &v @@ -88634,7 +88681,7 @@ type Host struct { // is true, the host is in a host resource group; otherwise, it is not. MemberOfServiceLinkedResourceGroup *bool `locationName:"memberOfServiceLinkedResourceGroup" type:"boolean"` - // The ID of the AWS account that owns the Dedicated Host. + // The ID of the account that owns the Dedicated Host. OwnerId *string `locationName:"ownerId" type:"string"` // The time that the Dedicated Host was released. @@ -88769,7 +88816,7 @@ type HostInstance struct { // The instance type (for example, m3.medium) of the running instance. InstanceType *string `locationName:"instanceType" type:"string"` - // The ID of the AWS account that owns the instance. + // The ID of the account that owns the instance. OwnerId *string `locationName:"ownerId" type:"string"` } @@ -91335,7 +91382,7 @@ type Instance struct { // Specifies whether enhanced networking with ENA is enabled. EnaSupport *bool `locationName:"enaSupport" type:"boolean"` - // Indicates whether the instance is enabled for AWS Nitro Enclaves. + // Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` // Indicates whether the instance is enabled for hibernation. @@ -93577,10 +93624,10 @@ func (s *InstanceTypeOffering) SetLocationType(v string) *InstanceTypeOffering { type InstanceUsage struct { _ struct{} `type:"structure"` - // The ID of the AWS account that is making use of the Capacity Reservation. + // The ID of the account that is making use of the Capacity Reservation. AccountId *string `locationName:"accountId" type:"string"` - // The number of instances the AWS account currently has in the Capacity Reservation. + // The number of instances the account currently has in the Capacity Reservation. UsedInstanceCount *int64 `locationName:"usedInstanceCount" type:"integer"` } @@ -97988,6 +98035,9 @@ func (s *ModifyEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *ModifyEbsDefault type ModifyFleetInput struct { _ struct{} `type:"structure"` + // Reserved. + Context *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -98049,6 +98099,12 @@ func (s *ModifyFleetInput) Validate() error { return nil } +// SetContext sets the Context field's value. +func (s *ModifyFleetInput) SetContext(v string) *ModifyFleetInput { + s.Context = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ModifyFleetInput) SetDryRun(v bool) *ModifyFleetInput { s.DryRun = &v @@ -98771,9 +98827,9 @@ type ModifyInstanceAttributeInput struct { SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` // Changes the instance's user data to the specified value. If you are using - // an AWS SDK or command line tool, base64-encoding is performed for you, and - // you can load the text from a file. Otherwise, you must provide base64-encoded - // text. + // an Amazon Web Services SDK or command line tool, base64-encoding is performed + // for you, and you can load the text from a file. Otherwise, you must provide + // base64-encoded text. UserData *BlobAttributeValue `locationName:"userData" type:"structure"` // A new value for the attribute. Use only with the kernel, ramdisk, userData, @@ -99989,6 +100045,9 @@ func (s ModifySnapshotAttributeOutput) GoString() string { type ModifySpotFleetRequestInput struct { _ struct{} `type:"structure"` + // Reserved. + Context *string `type:"string"` + // Indicates whether running Spot Instances should be terminated if the target // capacity of the Spot Fleet request is decreased below the current size of // the Spot Fleet. @@ -100045,6 +100104,12 @@ func (s *ModifySpotFleetRequestInput) Validate() error { return nil } +// SetContext sets the Context field's value. +func (s *ModifySpotFleetRequestInput) SetContext(v string) *ModifySpotFleetRequestInput { + s.Context = &v + return s +} + // SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. func (s *ModifySpotFleetRequestInput) SetExcessCapacityTerminationPolicy(v string) *ModifySpotFleetRequestInput { s.ExcessCapacityTerminationPolicy = &v @@ -106713,7 +106778,10 @@ func (s *PurchaseReservedInstancesOfferingInput) SetReservedInstancesOfferingId( type PurchaseReservedInstancesOfferingOutput struct { _ struct{} `type:"structure"` - // The IDs of the purchased Reserved Instances. + // The IDs of the purchased Reserved Instances. If your purchase crosses into + // a discounted pricing tier, the final Reserved Instances IDs might change. + // For more information, see Crossing pricing tiers (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-reserved-instances-application.html#crossing-pricing-tiers) + // in the Amazon Elastic Compute Cloud User Guide. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` } @@ -109405,20 +109473,7 @@ type RequestSpotInstancesInput struct { // Default: Instances are launched in any available Availability Zone. AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` - // The required duration for the Spot Instances (also known as Spot blocks), - // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, - // or 360). - // - // The duration period starts as soon as your Spot Instance receives its instance - // ID. At the end of the duration period, Amazon EC2 marks the Spot Instance - // for termination and provides a Spot Instance termination notice, which gives - // the instance a two-minute warning before it terminates. - // - // You can't specify an Availability Zone group or a launch group if you specify - // a duration. - // - // New accounts or accounts with no previous billing history with AWS are not - // eligible for Spot Instances with a defined duration (also known as Spot blocks). + // Deprecated. BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -109816,11 +109871,11 @@ type Reservation struct { // The instances. Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"` - // The ID of the AWS account that owns the reservation. + // The ID of the account that owns the reservation. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the requester that launched the instances on your behalf (for example, - // AWS Management Console or Auto Scaling). + // Management Console or Auto Scaling). RequesterId *string `locationName:"requesterId" type:"string"` // The ID of the reservation. @@ -110509,8 +110564,8 @@ type ReservedInstancesOffering struct { InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` // Indicates whether the offering is available through the Reserved Instance - // Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, - // this is true. + // Marketplace (resale) or Amazon Web Services. If it's a Reserved Instance + // Marketplace offering, this is true. Marketplace *bool `locationName:"marketplace" type:"boolean"` // If convertible it can be exchanged for Reserved Instances of the same or @@ -112486,18 +112541,20 @@ type RunInstancesInput struct { // You cannot specify accelerators from different generations in the same request. ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` - // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more - // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) - // in the AWS Nitro Enclaves User Guide. + // Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. + // For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + // in the Amazon Web Services Nitro Enclaves User Guide. // - // You can't enable AWS Nitro Enclaves and hibernation on the same instance. + // You can't enable Amazon Web Services Nitro Enclaves and hibernation on the + // same instance. EnclaveOptions *EnclaveOptionsRequest `type:"structure"` // Indicates whether an instance is enabled for hibernation. For more information, // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. // - // You can't enable hibernation and AWS Nitro Enclaves on the same instance. + // You can't enable hibernation and Amazon Web Services Nitro Enclaves on the + // same instance. HibernationOptions *HibernationOptionsRequest `type:"structure"` // The name or Amazon Resource Name (ARN) of an IAM instance profile. @@ -112619,8 +112676,8 @@ type RunInstancesInput struct { // The ID of the RAM disk to select. Some kernels require additional drivers // at launch. Check the kernel requirements for information about whether you - // need to specify a RAM disk. To find kernel requirements, go to the AWS Resource - // Center and search for the kernel ID. + // need to specify a RAM disk. To find kernel requirements, go to the Amazon + // Web Services Resource Center and search for the kernel ID. // // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more // information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) @@ -115809,7 +115866,7 @@ type SpotDatafeedSubscription struct { // The fault codes for the Spot Instance request, if any. Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` - // The AWS account ID of the account. + // The account ID of the account. OwnerId *string `locationName:"ownerId" type:"string"` // The prefix for the data feed files. @@ -115915,8 +115972,8 @@ type SpotFleetLaunchSpecification struct { // The ID of the RAM disk. Some kernels require additional drivers at launch. // Check the kernel requirements for information about whether you need to specify - // a RAM disk. To find kernel requirements, refer to the AWS Resource Center - // and search for the kernel ID. + // a RAM disk. To find kernel requirements, refer to the Amazon Web Services + // Resource Center and search for the kernel ID. RamdiskId *string `locationName:"ramdiskId" type:"string"` // One or more security groups. When requesting instances in a VPC, you must @@ -116199,6 +116256,9 @@ type SpotFleetRequestConfigData struct { // see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` + // Reserved. + Context *string `locationName:"context" type:"string"` + // Indicates whether running Spot Instances should be terminated if you decrease // the target capacity of the Spot Fleet request below the current size of the // Spot Fleet. @@ -116208,7 +116268,7 @@ type SpotFleetRequestConfigData struct { // capacity. You cannot set this value. FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"` - // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) + // The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) // role that grants the Spot Fleet the permission to request, launch, terminate, // and tag instances on your behalf. For more information, see Spot Fleet prerequisites // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites) @@ -116227,6 +116287,15 @@ type SpotFleetRequestConfigData struct { // Valid only when Spot AllocationStrategy is set to lowest-price. Spot Fleet // selects the cheapest Spot pools and evenly allocates your target Spot capacity // across the number of Spot pools that you specify. + // + // Note that Spot Fleet attempts to draw Spot Instances from the number of pools + // that you specify on a best effort basis. If a pool runs out of Spot capacity + // before fulfilling your target capacity, Spot Fleet will continue to fulfill + // your request by drawing from the next cheapest pool. To ensure that your + // target capacity is met, you might receive Spot Instances from more than the + // number of pools that you specified. Similarly, if most of the pools have + // no Spot capacity, you might receive your full target capacity from fewer + // than the number of pools that you specified. InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` // The launch specifications for the Spot Fleet request. If you specify LaunchSpecifications, @@ -116395,6 +116464,12 @@ func (s *SpotFleetRequestConfigData) SetClientToken(v string) *SpotFleetRequestC return s } +// SetContext sets the Context field's value. +func (s *SpotFleetRequestConfigData) SetContext(v string) *SpotFleetRequestConfigData { + s.Context = &v + return s +} + // SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. func (s *SpotFleetRequestConfigData) SetExcessCapacityTerminationPolicy(v string) *SpotFleetRequestConfigData { s.ExcessCapacityTerminationPolicy = &v @@ -116566,8 +116641,7 @@ func (s *SpotFleetTagSpecification) SetTags(v []*Tag) *SpotFleetTagSpecification type SpotInstanceRequest struct { _ struct{} `type:"structure"` - // If you specified a duration and your Spot Instance request was fulfilled, - // this is the fixed hourly price in effect for the Spot Instance while it runs. + // Deprecated. ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"` // The Availability Zone group. If you specify the same Availability Zone group @@ -116575,7 +116649,7 @@ type SpotInstanceRequest struct { // Availability Zone. AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` - // The duration for the Spot Instance, in minutes. + // Deprecated. BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` // The date and time when the Spot Instance request was created, in UTC format @@ -116885,8 +116959,9 @@ type SpotMarketOptions struct { // You can't specify an Availability Zone group or a launch group if you specify // a duration. // - // New accounts or accounts with no previous billing history with AWS are not - // eligible for Spot Instances with a defined duration (also known as Spot blocks). + // New accounts or accounts with no previous billing history with Amazon Web + // Services are not eligible for Spot Instances with a defined duration (also + // known as Spot blocks). BlockDurationMinutes *int64 `type:"integer"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -116986,6 +117061,15 @@ type SpotOptions struct { // Valid only when AllocationStrategy is set to lowest-price. EC2 Fleet selects // the cheapest Spot pools and evenly allocates your target Spot capacity across // the number of Spot pools that you specify. + // + // Note that EC2 Fleet attempts to draw Spot Instances from the number of pools + // that you specify on a best effort basis. If a pool runs out of Spot capacity + // before fulfilling your target capacity, EC2 Fleet will continue to fulfill + // your request by drawing from the next cheapest pool. To ensure that your + // target capacity is met, you might receive Spot Instances from more than the + // number of pools that you specified. Similarly, if most of the pools have + // no Spot capacity, you might receive your full target capacity from fewer + // than the number of pools that you specified. InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` // The strategies for managing your workloads on your Spot Instances that will @@ -117099,6 +117183,15 @@ type SpotOptionsRequest struct { // Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet // selects the cheapest Spot pools and evenly allocates your target Spot capacity // across the number of Spot pools that you specify. + // + // Note that EC2 Fleet attempts to draw Spot Instances from the number of pools + // that you specify on a best effort basis. If a pool runs out of Spot capacity + // before fulfilling your target capacity, EC2 Fleet will continue to fulfill + // your request by drawing from the next cheapest pool. To ensure that your + // target capacity is met, you might receive Spot Instances from more than the + // number of pools that you specified. Similarly, if most of the pools have + // no Spot capacity, you might receive your full target capacity from fewer + // than the number of pools that you specified. InstancePoolsToUseCount *int64 `type:"integer"` // The strategies for managing your Spot Instances that are at an elevated risk @@ -130001,6 +130094,9 @@ const ( // ResourceTypeNetworkInsightsAnalysis is a ResourceType enum value ResourceTypeNetworkInsightsAnalysis = "network-insights-analysis" + // ResourceTypeNetworkInsightsBoundary is a ResourceType enum value + ResourceTypeNetworkInsightsBoundary = "network-insights-boundary" + // ResourceTypeNetworkInsightsPath is a ResourceType enum value ResourceTypeNetworkInsightsPath = "network-insights-path" @@ -130098,6 +130194,7 @@ func ResourceType_Values() []string { ResourceTypeNetworkAcl, ResourceTypeNetworkInterface, ResourceTypeNetworkInsightsAnalysis, + ResourceTypeNetworkInsightsBoundary, ResourceTypeNetworkInsightsPath, ResourceTypePlacementGroup, ResourceTypeReservedInstances, diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index e90658ada42..e07d7ed3351 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -150,7 +150,7 @@ func (c *SageMaker) AddTagsRequest(input *AddTagsInput) (req *request.Request, o // // Each tag consists of a key and an optional value. Tag keys must be unique // per resource. For more information about tags, see For more information, -// see AWS Tagging Strategies (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). +// see Amazon Web Services Tagging Strategies (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). // // Tags that you add to a hyperparameter tuning job by calling this API are // also added to any training jobs that the hyperparameter tuning job launches @@ -415,7 +415,7 @@ func (c *SageMaker) CreateAlgorithmRequest(input *CreateAlgorithmInput) (req *re // CreateAlgorithm API operation for Amazon SageMaker Service. // // Create a machine learning algorithm that you can use in Amazon SageMaker -// and list in the AWS Marketplace. +// and list in the Amazon Web Services Marketplace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -839,7 +839,7 @@ func (c *SageMaker) CreateCodeRepositoryRequest(input *CreateCodeRepositoryInput // more than one notebook instance, and it persists independently from the lifecycle // of any notebook instances it is associated with. // -// The repository can be hosted either in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) +// The repository can be hosted either in Amazon Web Services CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -920,8 +920,8 @@ func (c *SageMaker) CreateCompilationJobRequest(input *CreateCompilationJobInput // // If you choose to host your model using Amazon SageMaker hosting services, // you can use the resulting model artifacts as part of the model. You can also -// use the artifacts with AWS IoT Greengrass. In that case, deploy them as an -// ML resource. +// use the artifacts with Amazon Web Services IoT Greengrass. In that case, +// deploy them as an ML resource. // // In the request body, you provide the following: // @@ -1283,9 +1283,9 @@ func (c *SageMaker) CreateDomainRequest(input *CreateDomainInput) (req *request. // Creates a Domain used by Amazon SageMaker Studio. A domain consists of an // associated Amazon Elastic File System (EFS) volume, a list of authorized // users, and a variety of security, application, policy, and Amazon Virtual -// Private Cloud (VPC) configurations. An AWS account is limited to one domain -// per region. Users within a domain can share notebook files and other artifacts -// with each other. +// Private Cloud (VPC) configurations. An Amazon Web Services account is limited +// to one domain per region. Users within a domain can share notebook files +// and other artifacts with each other. // // EFS storage // @@ -1293,10 +1293,11 @@ func (c *SageMaker) CreateDomainRequest(input *CreateDomainInput) (req *request. // users within the domain. Each user receives a private home directory within // the EFS volume for notebooks, Git repositories, and data files. // -// SageMaker uses the AWS Key Management Service (AWS KMS) to encrypt the EFS -// volume attached to the domain with an AWS managed customer master key (CMK) -// by default. For more control, you can specify a customer managed CMK. For -// more information, see Protect Data at Rest Using Encryption (https://docs.aws.amazon.com/sagemaker/latest/dg/encryption-at-rest.html). +// SageMaker uses the Amazon Web Services Key Management Service (Amazon Web +// Services KMS) to encrypt the EFS volume attached to the domain with an Amazon +// Web Services managed customer master key (CMK) by default. For more control, +// you can specify a customer managed CMK. For more information, see Protect +// Data at Rest Using Encryption (https://docs.aws.amazon.com/sagemaker/latest/dg/encryption-at-rest.html). // // VPC configuration // @@ -1316,6 +1317,9 @@ func (c *SageMaker) CreateDomainRequest(input *CreateDomainInput) (req *request. // VPC has an interface endpoint to the SageMaker API and runtime or a NAT // gateway and your security groups allow outbound connections. // +// NFS traffic over TCP on port 2049 needs to be allowed in both inbound and +// outbound rules in order to launch a SageMaker Studio app successfully. +// // For more information, see Connect SageMaker Studio Notebooks to Resources // in a VPC (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html). // @@ -1492,13 +1496,14 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // // For an example that calls this method when deploying a model to Amazon SageMaker // hosting services, see Deploy the Model to Amazon SageMaker Hosting Services -// (AWS SDK for Python (Boto 3)). (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) +// (Amazon Web Services SDK for Python (Boto 3)). (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) // // You must not delete an EndpointConfig that is in use by an endpoint that // is live or while the UpdateEndpoint or CreateEndpoint operations are being // performed on the endpoint. To update an endpoint, you must create a new EndpointConfig. // -// The endpoint name must be unique within an AWS Region in your AWS account. +// The endpoint name must be unique within an Amazon Web Services Region in +// your Amazon Web Services account. // // When it receives the request, Amazon SageMaker creates the endpoint, launches // the resources (ML compute instances), and deploys the model(s) on them. @@ -1520,12 +1525,13 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // the status of an endpoint, use the DescribeEndpoint API. // // If any of the models hosted at this endpoint get model data from an Amazon -// S3 location, Amazon SageMaker uses AWS Security Token Service to download -// model artifacts from the S3 path you provided. AWS STS is activated in your -// IAM user account by default. If you previously deactivated AWS STS for a -// region, you need to reactivate AWS STS for that region. For more information, -// see Activating and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the AWS Identity and Access Management User Guide. +// S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service +// to download model artifacts from the S3 path you provided. Amazon Web Services +// STS is activated in your IAM user account by default. If you previously deactivated +// Amazon Web Services STS for a region, you need to reactivate Amazon Web Services +// STS for that region. For more information, see Activating and Deactivating +// Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the Amazon Web Services Identity and Access Management User Guide. // // To add the IAM role policies for using this API operation, go to the IAM // console (https://console.aws.amazon.com/iam/), and choose Roles in the left @@ -1642,7 +1648,7 @@ func (c *SageMaker) CreateEndpointConfigRequest(input *CreateEndpointConfigInput // // For an example that calls this method when deploying a model to Amazon SageMaker // hosting services, see Deploy the Model to Amazon SageMaker Hosting Services -// (AWS SDK for Python (Boto 3)). (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) +// (Amazon Web Services SDK for Python (Boto 3)). (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) // // When you call CreateEndpoint, a load call is made to DynamoDB to verify that // your endpoint configuration exists. When you read data from a DynamoDB table @@ -1744,8 +1750,8 @@ func (c *SageMaker) CreateExperimentRequest(input *CreateExperimentInput) (req * // // When you use SageMaker Studio or the SageMaker Python SDK, all experiments, // trials, and trial components are automatically tracked, logged, and indexed. -// When you use the AWS SDK for Python (Boto), you must use the logging APIs -// provided by the SDK. +// When you use the Amazon Web Services SDK for Python (Boto), you must use +// the logging APIs provided by the SDK. // // You can add tags to experiments, trials, trial components and then use the // Search API to search for the tags. @@ -1843,8 +1849,8 @@ func (c *SageMaker) CreateFeatureGroupRequest(input *CreateFeatureGroupInput) (r // The FeatureGroup defines the schema and features contained in the FeatureGroup. // A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, // an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. -// Check AWS service quotas (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) -// to see the FeatureGroups quota for your AWS account. +// Check Amazon Web Services service quotas (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// to see the FeatureGroups quota for your Amazon Web Services account. // // You must include at least one of OnlineStoreConfig and OfflineStoreConfig // to create a FeatureGroup. @@ -2368,8 +2374,8 @@ func (c *SageMaker) CreateLabelingJobRequest(input *CreateLabelingJobInput) (req // and outside experts. Use a private workforce when want the data to stay // within your organization or when a specific set of skills is required. // -// * One or more vendors that you select from the AWS Marketplace. Vendors -// provide expertise in specific areas. +// * One or more vendors that you select from the Amazon Web Services Marketplace. +// Vendors provide expertise in specific areas. // // * The Amazon Mechanical Turk workforce. This is the largest workforce, // but it should only be used for public data or data that has been stripped @@ -2495,7 +2501,7 @@ func (c *SageMaker) CreateModelRequest(input *CreateModelInput) (req *request.Re // // For an example that calls this method when deploying a model to Amazon SageMaker // hosting services, see Deploy the Model to Amazon SageMaker Hosting Services -// (AWS SDK for Python (Boto 3)). (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) +// (Amazon Web Services SDK for Python (Boto 3)). (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) // // To run a batch transform using your model, you start a job with the CreateTransformJob // API. Amazon SageMaker uses your model and your dataset to get inferences @@ -2508,8 +2514,8 @@ func (c *SageMaker) CreateModelRequest(input *CreateModelInput) (req *request.Re // to access model artifacts and docker image for deployment on ML compute hosting // instances or for batch transform jobs. In addition, you also use the IAM // role to manage permissions the inference code needs. For example, if the -// inference code access any other AWS resources, you grant necessary permissions -// via this role. +// inference code access any other Amazon Web Services resources, you grant +// necessary permissions via this role. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2756,15 +2762,15 @@ func (c *SageMaker) CreateModelPackageRequest(input *CreateModelPackageInput) (r // CreateModelPackage API operation for Amazon SageMaker Service. // // Creates a model package that you can use to create Amazon SageMaker models -// or list on AWS Marketplace, or a versioned model that is part of a model -// group. Buyers can subscribe to model packages listed on AWS Marketplace to -// create models in Amazon SageMaker. +// or list on Amazon Web Services Marketplace, or a versioned model that is +// part of a model group. Buyers can subscribe to model packages listed on Amazon +// Web Services Marketplace to create models in Amazon SageMaker. // // To create a model package by specifying a Docker container that contains // your inference code and the Amazon S3 location of your model artifacts, provide // values for InferenceSpecification. To create a model from an algorithm resource -// that you created or subscribed to in AWS Marketplace, provide a value for -// SourceAlgorithmSpecification. +// that you created or subscribed to in Amazon Web Services Marketplace, provide +// a value for SourceAlgorithmSpecification. // // There are two types of model packages: // @@ -3405,10 +3411,21 @@ func (c *SageMaker) CreatePresignedDomainUrlRequest(input *CreatePresignedDomain // Domain's Amazon Elastic File System (EFS) volume. This operation can only // be called when the authentication mode equals IAM. // +// The IAM role or user used to call this API defines the permissions to access +// the app. Once the presigned URL is created, no additional permission is required +// to access this URL. IAM authorization policies for this API are also enforced +// for every HTTP request and WebSocket frame that attempts to connect to the +// app. +// +// You can restrict access to this API and to the URL that it returns to a list +// of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For +// more information, see Connect to SageMaker Studio Through an Interface VPC +// Endpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-interface-endpoint.html) . +// // The URL that you get from a call to CreatePresignedDomainUrl has a default // timeout of 5 minutes. You can configure this value using ExpiresInSeconds. // If you try to use the URL after the timeout limit expires, you are directed -// to the AWS console sign-in page. +// to the Amazon Web Services console sign-in page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3507,7 +3524,7 @@ func (c *SageMaker) CreatePresignedNotebookInstanceUrlRequest(input *CreatePresi // // The URL that you get from a call to CreatePresignedNotebookInstanceUrl is // valid only for 5 minutes. If you try to use the URL after the 5-minute limit -// expires, you are directed to the AWS console sign-in page. +// expires, you are directed to the Amazon Web Services console sign-in page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3889,11 +3906,12 @@ func (c *SageMaker) CreateTransformJobRequest(input *CreateTransformJobInput) (r // In the request body, you provide the following: // // * TransformJobName - Identifies the transform job. The name must be unique -// within an AWS Region in an AWS account. +// within an Amazon Web Services Region in an Amazon Web Services account. // // * ModelName - Identifies the model to use. ModelName must be the name -// of an existing Amazon SageMaker model in the same AWS Region and AWS account. -// For information on creating a model, see CreateModel. +// of an existing Amazon SageMaker model in the same Amazon Web Services +// Region and Amazon Web Services account. For information on creating a +// model, see CreateModel. // // * TransformInput - Describes the dataset to be transformed and the Amazon // S3 location where it is stored. @@ -3997,8 +4015,8 @@ func (c *SageMaker) CreateTrialRequest(input *CreateTrialInput) (req *request.Re // // When you use SageMaker Studio or the SageMaker Python SDK, all experiments, // trials, and trial components are automatically tracked, logged, and indexed. -// When you use the AWS SDK for Python (Boto), you must use the logging APIs -// provided by the SDK. +// When you use the Amazon Web Services SDK for Python (Boto), you must use +// the logging APIs provided by the SDK. // // You can add tags to a trial and then use the Search API to search for the // tags. @@ -4097,17 +4115,12 @@ func (c *SageMaker) CreateTrialComponentRequest(input *CreateTrialComponentInput // // When you use SageMaker Studio or the SageMaker Python SDK, all experiments, // trials, and trial components are automatically tracked, logged, and indexed. -// When you use the AWS SDK for Python (Boto), you must use the logging APIs -// provided by the SDK. +// When you use the Amazon Web Services SDK for Python (Boto), you must use +// the logging APIs provided by the SDK. // // You can add tags to a trial component and then use the Search API to search // for the tags. // -// CreateTrialComponent can only be invoked from within an SageMaker managed -// environment. This includes SageMaker training jobs, processing jobs, transform -// jobs, and SageMaker notebooks. A call to CreateTrialComponent from outside -// one of these environments results in an error. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4277,12 +4290,13 @@ func (c *SageMaker) CreateWorkforceRequest(input *CreateWorkforceInput) (req *re // CreateWorkforce API operation for Amazon SageMaker Service. // // Use this operation to create a workforce. This operation will return an error -// if a workforce already exists in the AWS Region that you specify. You can -// only create one workforce in each AWS Region per AWS account. +// if a workforce already exists in the Amazon Web Services Region that you +// specify. You can only create one workforce in each Amazon Web Services Region +// per Amazon Web Services account. // -// If you want to create a new workforce in an AWS Region where a workforce -// already exists, use the API operation to delete the existing workforce and -// then use CreateWorkforce to create a new workforce. +// If you want to create a new workforce in an Amazon Web Services Region where +// a workforce already exists, use the API operation to delete the existing +// workforce and then use CreateWorkforce to create a new workforce. // // To create a private workforce using Amazon Cognito, you must specify a Cognito // user pool in CognitoConfig. You can also create an Amazon Cognito workforce @@ -5578,8 +5592,9 @@ func (c *SageMaker) DeleteFeatureGroupRequest(input *DeleteFeatureGroupInput) (r // of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately // after DeleteFeatureGroup is called. // -// Data written into the OfflineStore will not be deleted. The AWS Glue database -// and tables that are automatically created for your OfflineStore are not deleted. +// Data written into the OfflineStore will not be deleted. The Amazon Web Services +// Glue database and tables that are automatically created for your OfflineStore +// are not deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6234,9 +6249,9 @@ func (c *SageMaker) DeleteModelPackageRequest(input *DeleteModelPackageInput) (r // // Deletes a model package. // -// A model package is used to create Amazon SageMaker models or list on AWS -// Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace -// to create models in Amazon SageMaker. +// A model package is used to create Amazon SageMaker models or list on Amazon +// Web Services Marketplace. Buyers can subscribe to model packages listed on +// Amazon Web Services Marketplace to create models in Amazon SageMaker. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6325,6 +6340,12 @@ func (c *SageMaker) DeleteModelPackageGroupRequest(input *DeleteModelPackageGrou // // See the AWS API reference guide for Amazon SageMaker Service's // API operation DeleteModelPackageGroup for usage and error information. +// +// Returned Error Types: +// * ConflictException +// There was a conflict when you attempted to modify a SageMaker entity such +// as an Experiment or Artifact. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteModelPackageGroup func (c *SageMaker) DeleteModelPackageGroup(input *DeleteModelPackageGroupInput) (*DeleteModelPackageGroupOutput, error) { req, out := c.DeleteModelPackageGroupRequest(input) @@ -6874,6 +6895,12 @@ func (c *SageMaker) DeleteProjectRequest(input *DeleteProjectInput) (req *reques // // See the AWS API reference guide for Amazon SageMaker Service's // API operation DeleteProject for usage and error information. +// +// Returned Error Types: +// * ConflictException +// There was a conflict when you attempted to modify a SageMaker entity such +// as an Experiment or Artifact. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteProject func (c *SageMaker) DeleteProject(input *DeleteProjectInput) (*DeleteProjectOutput, error) { req, out := c.DeleteProjectRequest(input) @@ -7274,9 +7301,9 @@ func (c *SageMaker) DeleteWorkforceRequest(input *DeleteWorkforceInput) (req *re // // Use this operation to delete a workforce. // -// If you want to create a new workforce in an AWS Region where a workforce -// already exists, use this operation to delete the existing workforce and then -// use to create a new workforce. +// If you want to create a new workforce in an Amazon Web Services Region where +// a workforce already exists, use this operation to delete the existing workforce +// and then use to create a new workforce. // // If a private workforce contains one or more work teams, you must use the // operation to delete all work teams before you delete the workforce. If you @@ -9627,10 +9654,10 @@ func (c *SageMaker) DescribeModelPackageRequest(input *DescribeModelPackageInput // DescribeModelPackage API operation for Amazon SageMaker Service. // // Returns a description of the specified model package, which is used to create -// Amazon SageMaker models or list them on AWS Marketplace. +// Amazon SageMaker models or list them on Amazon Web Services Marketplace. // // To create models in Amazon SageMaker, buyers can subscribe to model packages -// listed on AWS Marketplace. +// listed on Amazon Web Services Marketplace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10478,7 +10505,7 @@ func (c *SageMaker) DescribeSubscribedWorkteamRequest(input *DescribeSubscribedW // DescribeSubscribedWorkteam API operation for Amazon SageMaker Service. // // Gets information about a work team provided by a vendor. It returns details -// about the subscription with a vendor in the AWS Marketplace. +// about the subscription with a vendor in the Amazon Web Services Marketplace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11423,7 +11450,7 @@ func (c *SageMaker) GetModelPackageGroupPolicyRequest(input *GetModelPackageGrou // Gets a resource policy that manages access for a model group. For information // about resource policies, see Identity-based policies and resource-based policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html) -// in the AWS Identity and Access Management User Guide.. +// in the Amazon Web Services Identity and Access Management User Guide.. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -15529,7 +15556,7 @@ func (c *SageMaker) ListModelPackageGroupsRequest(input *ListModelPackageGroupsI // ListModelPackageGroups API operation for Amazon SageMaker Service. // -// Gets a list of the model groups in your AWS account. +// Gets a list of the model groups in your Amazon Web Services account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -16455,7 +16482,7 @@ func (c *SageMaker) ListNotebookInstancesRequest(input *ListNotebookInstancesInp // ListNotebookInstances API operation for Amazon SageMaker Service. // // Returns a list of the Amazon SageMaker notebook instances in the requester's -// account in an AWS Region. +// account in an Amazon Web Services Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17262,7 +17289,7 @@ func (c *SageMaker) ListProjectsRequest(input *ListProjectsInput) (req *request. // ListProjects API operation for Amazon SageMaker Service. // -// Gets a list of the projects in an AWS account. +// Gets a list of the projects in an Amazon Web Services account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17394,9 +17421,9 @@ func (c *SageMaker) ListSubscribedWorkteamsRequest(input *ListSubscribedWorkteam // ListSubscribedWorkteams API operation for Amazon SageMaker Service. // -// Gets a list of the work teams that you are subscribed to in the AWS Marketplace. -// The list may be empty if no work team satisfies the filter specified in the -// NameContains parameter. +// Gets a list of the work teams that you are subscribed to in the Amazon Web +// Services Marketplace. The list may be empty if no work team satisfies the +// filter specified in the NameContains parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17675,7 +17702,8 @@ func (c *SageMaker) ListTrainingJobsRequest(input *ListTrainingJobsInput) (req * // are selected (sorted according to the creation time, from the most current // to the oldest). Next, those with a status of InProgress are returned. // -// You can quickly test the API using the following AWS CLI code. +// You can quickly test the API using the following Amazon Web Services CLI +// code. // // aws sagemaker list-training-jobs --max-results 100 --status-equals InProgress // @@ -18498,8 +18526,9 @@ func (c *SageMaker) ListWorkforcesRequest(input *ListWorkforcesInput) (req *requ // ListWorkforces API operation for Amazon SageMaker Service. // -// Use this operation to list all private and vendor workforces in an AWS Region. -// Note that you can only have one private workforce per AWS Region. +// Use this operation to list all private and vendor workforces in an Amazon +// Web Services Region. Note that you can only have one private workforce per +// Amazon Web Services Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -18762,7 +18791,7 @@ func (c *SageMaker) PutModelPackageGroupPolicyRequest(input *PutModelPackageGrou // Adds a resouce policy to control access to a model group. For information // about resoure policies, see Identity-based policies and resource-based policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html) -// in the AWS Identity and Access Management User Guide.. +// in the Amazon Web Services Identity and Access Management User Guide.. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20134,6 +20163,19 @@ func (c *SageMaker) StopPipelineExecutionRequest(input *StopPipelineExecutionInp // // Stops a pipeline execution. // +// A pipeline execution won't stop while a callback step is running. When you +// call StopPipelineExecution on a pipeline execution with a running callback +// step, SageMaker Pipelines sends an additional Amazon SQS message to the specified +// SQS queue. The body of the SQS message contains a "Status" field which is +// set to "Stopping". +// +// You should add logic to your Amazon SQS message consumer to take any needed +// action (for example, resource cleanup) upon receipt of the message followed +// by a call to SendPipelineExecutionStepSuccess or SendPipelineExecutionStepFailure. +// +// Only when SageMaker Pipelines receives one of these calls will it stop the +// pipeline execution. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -22647,9 +22689,10 @@ type AddTagsInput struct { // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -22809,9 +22852,9 @@ type AlgorithmSpecification struct { _ struct{} `type:"structure"` // The name of the algorithm resource to use for the training job. This must - // be an algorithm resource that you created or subscribe to on AWS Marketplace. - // If you specify a value for this parameter, you can't specify a value for - // TrainingImage. + // be an algorithm resource that you created or subscribe to on Amazon Web Services + // Marketplace. If you specify a value for this parameter, you can't specify + // a value for TrainingImage. AlgorithmName *string `min:"1" type:"string"` // To generate and save time-series metrics during training, set to true. The @@ -23082,7 +23125,7 @@ func (s *AlgorithmSummary) SetCreationTime(v time.Time) *AlgorithmSummary { // to validate your algorithm. // // The data provided in the validation profile is made available to your buyers -// on AWS Marketplace. +// on Amazon Web Services Marketplace. type AlgorithmValidationProfile struct { _ struct{} `type:"structure"` @@ -24479,8 +24522,9 @@ type AthenaDatasetDefinition struct { // Database is a required field Database *string `min:"1" type:"string" required:"true"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt data generated from an Athena query execution. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt data generated from an Athena query + // execution. KmsKeyId *string `type:"string"` // The compression used for Athena query results. @@ -25308,7 +25352,7 @@ func (s *AutoMLJobSummary) SetPartialFailureReasons(v []*AutoMLPartialFailureRea type AutoMLOutputDataConfig struct { _ struct{} `type:"structure"` - // The AWS KMS encryption key ID. + // The Amazon Web Services KMS encryption key ID. KmsKeyId *string `type:"string"` // The Amazon S3 output path. Must be 128 characters or less. @@ -26306,8 +26350,8 @@ type CodeRepositorySummary struct { CreationTime *time.Time `type:"timestamp" required:"true"` // Configuration details for the Git repository, including the URL where it - // is located and the ARN of the AWS Secrets Manager secret that contains the - // credentials used to access the repository. + // is located and the ARN of the Amazon Web Services Secrets Manager secret + // that contains the credentials used to access the repository. GitConfig *GitConfig `type:"structure"` // The date and time that the Git repository was last modified. @@ -26805,13 +26849,14 @@ type ContainerDefinition struct { // The model artifacts must be in an S3 bucket that is in the same region as // the model or endpoint you are creating. // - // If you provide a value for this parameter, Amazon SageMaker uses AWS Security - // Token Service to download model artifacts from the S3 path you provide. AWS - // STS is activated in your IAM user account by default. If you previously deactivated - // AWS STS for a region, you need to reactivate AWS STS for that region. For - // more information, see Activating and Deactivating AWS STS in an AWS Region - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the AWS Identity and Access Management User Guide. + // If you provide a value for this parameter, Amazon SageMaker uses Amazon Web + // Services Security Token Service to download model artifacts from the S3 path + // you provide. Amazon Web Services STS is activated in your IAM user account + // by default. If you previously deactivated Amazon Web Services STS for a region, + // you need to reactivate Amazon Web Services STS for that region. For more + // information, see Activating and Deactivating Amazon Web Services STS in an + // Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the Amazon Web Services Identity and Access Management User Guide. // // If you use a built-in algorithm to create a model, Amazon SageMaker requires // that you provide a S3 path to the model artifacts in ModelDataUrl. @@ -27190,7 +27235,8 @@ func (s *ContinuousParameterRangeSpecification) SetMinValue(v string) *Continuou type CreateActionInput struct { _ struct{} `type:"structure"` - // The name of the action. Must be unique to your account in an AWS Region. + // The name of the action. Must be unique to your account in an Amazon Web Services + // Region. // // ActionName is a required field ActionName *string `min:"1" type:"string" required:"true"` @@ -27350,7 +27396,8 @@ type CreateAlgorithmInput struct { // AlgorithmName is a required field AlgorithmName *string `min:"1" type:"string" required:"true"` - // Whether to certify the algorithm so that it can be listed in AWS Marketplace. + // Whether to certify the algorithm so that it can be listed in Amazon Web Services + // Marketplace. CertifyForMarketplace *bool `type:"boolean"` // Specifies details about inference jobs that the algorithm runs, including @@ -27366,9 +27413,10 @@ type CreateAlgorithmInput struct { // inference. InferenceSpecification *InferenceSpecification `type:"structure"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` // Specifies details about training jobs run by this algorithm, including the @@ -27752,7 +27800,8 @@ func (s *CreateAppOutput) SetAppArn(v string) *CreateAppOutput { type CreateArtifactInput struct { _ struct{} `type:"structure"` - // The name of the artifact. Must be unique to your account in an AWS Region. + // The name of the artifact. Must be unique to your account in an Amazon Web + // Services Region. ArtifactName *string `min:"1" type:"string"` // The artifact type. @@ -28109,9 +28158,10 @@ type CreateCodeRepositoryInput struct { // GitConfig is a required field GitConfig *GitConfig `type:"structure" required:"true"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` } @@ -28206,7 +28256,7 @@ type CreateCompilationJobInput struct { _ struct{} `type:"structure"` // A name for the model compilation job. The name must be unique within the - // AWS Region and within your AWS account. + // Amazon Web Services Region and within your Amazon Web Services account. // // CompilationJobName is a required field CompilationJobName *string `min:"1" type:"string" required:"true"` @@ -28251,9 +28301,10 @@ type CreateCompilationJobInput struct { // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` // A VpcConfig object that specifies the VPC that you want your compilation @@ -28407,7 +28458,8 @@ func (s *CreateCompilationJobOutput) SetCompilationJobArn(v string) *CreateCompi type CreateContextInput struct { _ struct{} `type:"structure"` - // The name of the context. Must be unique to your account in an AWS Region. + // The name of the context. Must be unique to your account in an Amazon Web + // Services Region. // // ContextName is a required field ContextName *string `min:"1" type:"string" required:"true"` @@ -28584,7 +28636,7 @@ type CreateDataQualityJobDefinitionInput struct { // (Optional) An array of key-value pairs. For more information, see Using Cost // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -28773,8 +28825,8 @@ type CreateDeviceFleetInput struct { // DeviceFleetName is a required field DeviceFleetName *string `min:"1" type:"string" required:"true"` - // Whether to create an AWS IoT Role Alias during device fleet creation. The - // name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}". + // Whether to create an Amazon Web Services IoT Role Alias during device fleet + // creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}". // // For example, if your device fleet is called "demo-fleet", the name of the // role alias will be "SageMakerEdge-demo-fleet". @@ -28785,8 +28837,8 @@ type CreateDeviceFleetInput struct { // OutputConfig is a required field OutputConfig *EdgeOutputConfig `type:"structure" required:"true"` - // The Amazon Resource Name (ARN) that has access to AWS Internet of Things - // (IoT). + // The Amazon Resource Name (ARN) that has access to Amazon Web Services Internet + // of Things (IoT). RoleArn *string `min:"20" type:"string"` // Creates tags for the specified fleet. @@ -28929,9 +28981,9 @@ type CreateDomainInput struct { // Deprecated: This property is deprecated, use KmsKeyId instead. HomeEfsFileSystemKmsKeyId *string `deprecated:"true" type:"string"` - // SageMaker uses AWS KMS to encrypt the EFS volume attached to the domain with - // an AWS managed customer master key (CMK) by default. For more control, specify - // a customer managed CMK. + // SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached + // to the domain with an Amazon Web Services managed customer master key (CMK) + // by default. For more control, specify a customer managed CMK. KmsKeyId *string `type:"string"` // The VPC subnets that Studio uses for communication. @@ -29276,9 +29328,9 @@ type CreateEndpointConfigInput struct { // EndpointConfigName is a required field EndpointConfigName *string `type:"string" required:"true"` - // The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon - // SageMaker uses to encrypt data on the storage volume attached to the ML compute - // instance that hosts the endpoint. + // The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service + // key that Amazon SageMaker uses to encrypt data on the storage volume attached + // to the ML compute instance that hosts the endpoint. // // The KmsKeyId can be any of the following formats: // @@ -29292,7 +29344,8 @@ type CreateEndpointConfigInput struct { // // The KMS key policy must grant permission to the IAM role that you specify // in your CreateEndpoint, UpdateEndpoint requests. For more information, refer - // to the AWS Key Management Service section Using Key Policies in AWS KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // to the Amazon Web Services Key Management Service section Using Key Policies + // in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) // // Certain Nitro-based instances include local storage, dependent on the instance // type. Local storage volumes are encrypted using a hardware module on the @@ -29316,9 +29369,10 @@ type CreateEndpointConfigInput struct { // ProductionVariants is a required field ProductionVariants []*ProductionVariant `min:"1" type:"list" required:"true"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` } @@ -29439,16 +29493,17 @@ type CreateEndpointInput struct { // EndpointConfigName is a required field EndpointConfigName *string `type:"string" required:"true"` - // The name of the endpoint.The name must be unique within an AWS Region in - // your AWS account. The name is case-insensitive in CreateEndpoint, but the - // case is preserved and must be matched in . + // The name of the endpoint.The name must be unique within an Amazon Web Services + // Region in your Amazon Web Services account. The name is case-insensitive + // in CreateEndpoint, but the case is preserved and must be matched in . // // EndpointName is a required field EndpointName *string `type:"string" required:"true"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` } @@ -29541,8 +29596,8 @@ type CreateExperimentInput struct { // If you don't specify DisplayName, the value in ExperimentName is displayed. DisplayName *string `min:"1" type:"string"` - // The name of the experiment. The name must be unique in your AWS account and - // is not case-sensitive. + // The name of the experiment. The name must be unique in your Amazon Web Services + // account and is not case-sensitive. // // ExperimentName is a required field ExperimentName *string `min:"1" type:"string" required:"true"` @@ -29674,8 +29729,8 @@ type CreateFeatureGroupInput struct { // FeatureDefinitions is a required field FeatureDefinitions []*FeatureDefinition `min:"1" type:"list" required:"true"` - // The name of the FeatureGroup. The name must be unique within an AWS Region - // in an AWS account. The name: + // The name of the FeatureGroup. The name must be unique within an Amazon Web + // Services Region in an Amazon Web Services account. The name: // // * Must start and end with an alphanumeric character. // @@ -29690,7 +29745,8 @@ type CreateFeatureGroupInput struct { // // * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore. // - // * A configuration for an AWS Glue or AWS Hive data cataolgue. + // * A configuration for an Amazon Web Services Glue or Amazon Web Services + // Hive data cataolgue. // // * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore. // @@ -29700,8 +29756,8 @@ type CreateFeatureGroupInput struct { // You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore // flag in OnlineStoreConfig; the default value is False. // - // You can also include an AWS KMS key ID (KMSKeyId) for at-rest encryption - // of the OnlineStore. + // You can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest + // encryption of the OnlineStore. OnlineStoreConfig *OnlineStoreConfig `type:"structure"` // The name of the Feature whose value uniquely identifies a Record defined @@ -30175,16 +30231,17 @@ type CreateHyperParameterTuningJobInput struct { // The name of the tuning job. This name is the prefix for the names of all // training jobs that this tuning job launches. The name must be unique within - // the same AWS account and AWS Region. The name must have 1 to 32 characters. - // Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name - // is not case sensitive. + // the same Amazon Web Services account and Amazon Web Services Region. The + // name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and + // : + = @ _ % - (hyphen). The name is not case sensitive. // // HyperParameterTuningJobName is a required field HyperParameterTuningJobName *string `min:"1" type:"string" required:"true"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). // // Tags that you specify for the tuning job are also added to all training jobs // that the tuning job launches. @@ -30485,8 +30542,9 @@ type CreateImageVersionInput struct { // BaseImage is a required field BaseImage *string `min:"1" type:"string" required:"true"` - // A unique ID. If not specified, the AWS CLI and AWS SDKs, such as the SDK - // for Python (Boto3), add a unique value to the call. + // A unique ID. If not specified, the Amazon Web Services CLI and Amazon Web + // Services SDKs, such as the SDK for Python (Boto3), add a unique value to + // the call. ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` // The ImageName of the Image to create a version of. @@ -30673,15 +30731,16 @@ type CreateLabelingJobInput struct { LabelingJobAlgorithmsConfig *LabelingJobAlgorithmsConfig `type:"structure"` // The name of the labeling job. This name is used to identify the job in a - // list of labeling jobs. Labeling job names must be unique within an AWS account - // and region. LabelingJobName is not case sensitive. For example, Example-job - // and example-job are considered the same labeling job name by Ground Truth. + // list of labeling jobs. Labeling job names must be unique within an Amazon + // Web Services account and region. LabelingJobName is not case sensitive. For + // example, Example-job and example-job are considered the same labeling job + // name by Ground Truth. // // LabelingJobName is a required field LabelingJobName *string `min:"1" type:"string" required:"true"` - // The location of the output data and the AWS Key Management Service key ID - // for the key used to encrypt the output data, if any. + // The location of the output data and the Amazon Web Services Key Management + // Service key ID for the key used to encrypt the output data, if any. // // OutputConfig is a required field OutputConfig *LabelingJobOutputConfig `type:"structure" required:"true"` @@ -30700,7 +30759,7 @@ type CreateLabelingJobInput struct { // An array of key/value pairs. For more information, see Using Cost Allocation // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -30875,8 +30934,8 @@ func (s *CreateLabelingJobOutput) SetLabelingJobArn(v string) *CreateLabelingJob type CreateModelBiasJobDefinitionInput struct { _ struct{} `type:"structure"` - // The name of the bias job definition. The name must be unique within an AWS - // Region in the AWS account. + // The name of the bias job definition. The name must be unique within an Amazon + // Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -30918,7 +30977,7 @@ type CreateModelBiasJobDefinitionInput struct { // (Optional) An array of key-value pairs. For more information, see Using Cost // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -31100,7 +31159,7 @@ type CreateModelExplainabilityJobDefinitionInput struct { _ struct{} `type:"structure"` // The name of the model explainability job definition. The name must be unique - // within an AWS Region in the AWS account. + // within an Amazon Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -31143,7 +31202,7 @@ type CreateModelExplainabilityJobDefinitionInput struct { // (Optional) An array of key-value pairs. For more information, see Using Cost // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -31356,9 +31415,10 @@ type CreateModelInput struct { // model is deployed for predictions. PrimaryContainer *ContainerDefinition `type:"structure"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` // A VpcConfig object that specifies the VPC that you want your model to connect @@ -31519,8 +31579,8 @@ type CreateModelPackageGroupInput struct { ModelPackageGroupName *string `min:"1" type:"string" required:"true"` // A list of key value pairs associated with the model group. For more information, - // see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // in the AWS General Reference Guide. + // see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. Tags []*Tag `type:"list"` } @@ -31606,7 +31666,7 @@ func (s *CreateModelPackageGroupOutput) SetModelPackageGroupArn(v string) *Creat type CreateModelPackageInput struct { _ struct{} `type:"structure"` - // Whether to certify the model package for listing on AWS Marketplace. + // Whether to certify the model package for listing on Amazon Web Services Marketplace. // // This parameter is optional for unversioned models, and does not apply to // versioned models. @@ -31663,8 +31723,8 @@ type CreateModelPackageInput struct { SourceAlgorithmSpecification *SourceAlgorithmSpecification `type:"structure"` // A list of key value pairs associated with the model. For more information, - // see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // in the AWS General Reference Guide. + // see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. Tags []*Tag `type:"list"` // Specifies configurations for one or more transform jobs that Amazon SageMaker @@ -31873,7 +31933,7 @@ type CreateModelQualityJobDefinitionInput struct { // (Optional) An array of key-value pairs. For more information, see Using Cost // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -32060,15 +32120,15 @@ type CreateMonitoringScheduleInput struct { // MonitoringScheduleConfig is a required field MonitoringScheduleConfig *MonitoringScheduleConfig `type:"structure" required:"true"` - // The name of the monitoring schedule. The name must be unique within an AWS - // Region within an AWS account. + // The name of the monitoring schedule. The name must be unique within an Amazon + // Web Services Region within an Amazon Web Services account. // // MonitoringScheduleName is a required field MonitoringScheduleName *string `min:"1" type:"string" required:"true"` // (Optional) An array of key-value pairs. For more information, see Using Cost // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -32170,17 +32230,19 @@ type CreateNotebookInstanceInput struct { // An array of up to three Git repositories to associate with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) - // or in any other Git repository. These repositories are cloned at the same - // level as the default repository of your notebook instance. For more information, + // your account, or the URL of Git repositories in Amazon Web Services CodeCommit + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // in any other Git repository. These repositories are cloned at the same level + // as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // A Git repository to associate with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) - // or in any other Git repository. When you open a notebook instance, it opens + // in your account, or the URL of a Git repository in Amazon Web Services CodeCommit + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` @@ -32201,11 +32263,11 @@ type CreateNotebookInstanceInput struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"InstanceType"` - // The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon - // SageMaker uses to encrypt data on the storage volume attached to your notebook - // instance. The KMS key you provide must be enabled. For information, see Enabling - // and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) - // in the AWS Key Management Service Developer Guide. + // The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service + // key that Amazon SageMaker uses to encrypt data on the storage volume attached + // to your notebook instance. The KMS key you provide must be enabled. For information, + // see Enabling and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) + // in the Amazon Web Services Key Management Service Developer Guide. KmsKeyId *string `type:"string"` // The name of a lifecycle configuration to associate with the notebook instance. @@ -32218,12 +32280,12 @@ type CreateNotebookInstanceInput struct { // NotebookInstanceName is a required field NotebookInstanceName *string `type:"string" required:"true"` - // When you send any requests to AWS resources from the notebook instance, Amazon - // SageMaker assumes this role to perform tasks on your behalf. You must grant - // this role necessary permissions so Amazon SageMaker can perform these tasks. - // The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) - // permissions to assume this role. For more information, see Amazon SageMaker - // Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + // When you send any requests to Amazon Web Services resources from the notebook + // instance, Amazon SageMaker assumes this role to perform tasks on your behalf. + // You must grant this role necessary permissions so Amazon SageMaker can perform + // these tasks. The policy must allow the Amazon SageMaker service principal + // (sagemaker.amazonaws.com) permissions to assume this role. For more information, + // see Amazon SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). // // To be able to pass this role to Amazon SageMaker, the caller of this API // must have the iam:PassRole permission. @@ -32248,9 +32310,10 @@ type CreateNotebookInstanceInput struct { // from your ML compute instance. SubnetId *string `type:"string"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` // The size, in GB, of the ML storage volume to attach to the notebook instance. @@ -32870,8 +32933,8 @@ type CreateProcessingJobInput struct { // An array of inputs configuring the data to download into the processing container. ProcessingInputs []*ProcessingInput `type:"list"` - // The name of the processing job. The name must be unique within an AWS Region - // in the AWS account. + // The name of the processing job. The name must be unique within an Amazon + // Web Services Region in the Amazon Web Services account. // // ProcessingJobName is a required field ProcessingJobName *string `min:"1" type:"string" required:"true"` @@ -32897,7 +32960,7 @@ type CreateProcessingJobInput struct { // (Optional) An array of key-value pairs. For more information, see Using Cost // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -33092,14 +33155,15 @@ type CreateProjectInput struct { ProjectName *string `min:"1" type:"string" required:"true"` // The product ID and provisioning artifact ID to provision a service catalog. - // For information, see What is AWS Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). + // For information, see What is Amazon Web Services Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). // // ServiceCatalogProvisioningDetails is a required field ServiceCatalogProvisioningDetails *ServiceCatalogProvisioningDetails `type:"structure" required:"true"` // An array of key-value pairs that you want to use to organize and track your - // AWS resource costs. For more information, see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // in the AWS General Reference Guide. + // Amazon Web Services resource costs. For more information, see Tagging Amazon + // Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. Tags []*Tag `type:"list"` } @@ -33358,16 +33422,17 @@ type CreateTrainingJobInput struct { // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` // Configuration of storage locations for the Debugger TensorBoard output data. TensorBoardOutputConfig *TensorBoardOutputConfig `type:"structure"` - // The name of the training job. The name must be unique within an AWS Region - // in an AWS account. + // The name of the training job. The name must be unique within an Amazon Web + // Services Region in an Amazon Web Services account. // // TrainingJobName is a required field TrainingJobName *string `min:"1" type:"string" required:"true"` @@ -33745,15 +33810,15 @@ type CreateTransformJobInput struct { ModelClientConfig *ModelClientConfig `type:"structure"` // The name of the model that you want to use for the transform job. ModelName - // must be the name of an existing Amazon SageMaker model within an AWS Region - // in an AWS account. + // must be the name of an existing Amazon SageMaker model within an Amazon Web + // Services Region in an Amazon Web Services account. // // ModelName is a required field ModelName *string `type:"string" required:"true"` // (Optional) An array of key-value pairs. For more information, see Using Cost // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` // Describes the input source and the way the transform job consumes it. @@ -33761,8 +33826,8 @@ type CreateTransformJobInput struct { // TransformInput is a required field TransformInput *TransformInput `type:"structure" required:"true"` - // The name of the transform job. The name must be unique within an AWS Region - // in an AWS account. + // The name of the transform job. The name must be unique within an Amazon Web + // Services Region in an Amazon Web Services account. // // TransformJobName is a required field TransformJobName *string `min:"1" type:"string" required:"true"` @@ -33995,8 +34060,8 @@ type CreateTrialComponentInput struct { // search on the tags. Tags []*Tag `type:"list"` - // The name of the component. The name must be unique in your AWS account and - // is not case-sensitive. + // The name of the component. The name must be unique in your Amazon Web Services + // account and is not case-sensitive. // // TrialComponentName is a required field TrialComponentName *string `min:"1" type:"string" required:"true"` @@ -34163,8 +34228,8 @@ type CreateTrialInput struct { // on the tags. Tags []*Tag `type:"list"` - // The name of the trial. The name must be unique in your AWS account and is - // not case-sensitive. + // The name of the trial. The name must be unique in your Amazon Web Services + // account and is not case-sensitive. // // TrialName is a required field TrialName *string `min:"1" type:"string" required:"true"` @@ -34282,10 +34347,10 @@ type CreateUserProfileInput struct { // be specified. SingleSignOnUserIdentifier *string `type:"string"` - // The username of the associated AWS Single Sign-On User for this UserProfile. - // If the Domain's AuthMode is SSO, this field is required, and must match a - // valid username of a user in your directory. If the Domain's AuthMode is not - // SSO, this field cannot be specified. + // The username of the associated Amazon Web Services Single Sign-On User for + // this UserProfile. If the Domain's AuthMode is SSO, this field is required, + // and must match a valid username of a user in your directory. If the Domain's + // AuthMode is not SSO, this field cannot be specified. SingleSignOnUserValue *string `type:"string"` // Each tag consists of a key and an optional value. Tag keys must be unique @@ -34580,7 +34645,7 @@ type CreateWorkteamInput struct { // // For more information, see Resource Tag (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) // and Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` // The name of the workforce. @@ -36567,7 +36632,7 @@ type DeleteFeatureGroupInput struct { _ struct{} `type:"structure"` // The name of the FeatureGroup you want to delete. The name must be unique - // within an AWS Region in an AWS account. + // within an Amazon Web Services Region in an Amazon Web Services account. // // FeatureGroupName is a required field FeatureGroupName *string `min:"1" type:"string" required:"true"` @@ -38324,7 +38389,7 @@ type DescribeAlgorithmOutput struct { // AlgorithmStatusDetails is a required field AlgorithmStatusDetails *AlgorithmStatusDetails `type:"structure" required:"true"` - // Whether the algorithm is certified to be listed in AWS Marketplace. + // Whether the algorithm is certified to be listed in Amazon Web Services Marketplace. CertifyForMarketplace *bool `type:"boolean"` // A timestamp specifying when the algorithm was created. @@ -38983,9 +39048,9 @@ type DescribeAutoMLJobOutput struct { // them, the values used are the ones you provide. ResolvedAttributes *ResolvedAttributes `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that has read permission to the input data location and write - // permission to the output data location in Amazon S3. + // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access + // Management (IAM) role that has read permission to the input data location + // and write permission to the output data location in Amazon S3. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -39188,8 +39253,8 @@ type DescribeCodeRepositoryOutput struct { // Configuration details about the repository, including the URL where the repository // is located, the default branch, and the Amazon Resource Name (ARN) of the - // AWS Secrets Manager secret that contains the credentials used to access the - // repository. + // Amazon Web Services Secrets Manager secret that contains the credentials + // used to access the repository. GitConfig *GitConfig `type:"structure"` // The date and time that the repository was last changed. @@ -39322,6 +39387,8 @@ type DescribeCompilationJobOutput struct { // FailureReason is a required field FailureReason *string `type:"string" required:"true"` + // The inference image to use when compiling a model. Specify an image only + // if the target device is a cloud instance. InferenceImage *string `type:"string"` // Information about the location in Amazon S3 of the input model artifacts, @@ -39859,7 +39926,8 @@ type DescribeDeviceFleetOutput struct { // DeviceFleetName is a required field DeviceFleetName *string `min:"1" type:"string" required:"true"` - // The Amazon Resource Name (ARN) alias created in AWS Internet of Things (IoT). + // The Amazon Resource Name (ARN) alias created in Amazon Web Services Internet + // of Things (IoT). IotRoleAlias *string `type:"string"` // Timestamp of when the device fleet was last updated. @@ -39872,8 +39940,8 @@ type DescribeDeviceFleetOutput struct { // OutputConfig is a required field OutputConfig *EdgeOutputConfig `type:"structure" required:"true"` - // The Amazon Resource Name (ARN) that has access to AWS Internet of Things - // (IoT). + // The Amazon Resource Name (ARN) that has access to Amazon Web Services Internet + // of Things (IoT). RoleArn *string `min:"20" type:"string"` } @@ -40021,7 +40089,8 @@ type DescribeDeviceOutput struct { // DeviceName is a required field DeviceName *string `min:"1" type:"string" required:"true"` - // The AWS Internet of Things (IoT) object thing name associated with the device. + // The Amazon Web Services Internet of Things (IoT) object thing name associated + // with the device. IotThingName *string `type:"string"` // The last heartbeat received from the device. @@ -40192,8 +40261,8 @@ type DescribeDomainOutput struct { // Deprecated: This property is deprecated, use KmsKeyId instead. HomeEfsFileSystemKmsKeyId *string `deprecated:"true" type:"string"` - // The AWS KMS customer managed CMK used to encrypt the EFS volume attached - // to the domain. + // The Amazon Web Services KMS customer managed CMK used to encrypt the EFS + // volume attached to the domain. KmsKeyId *string `type:"string"` // The last modified time. @@ -40583,8 +40652,8 @@ type DescribeEndpointConfigOutput struct { // EndpointConfigName is a required field EndpointConfigName *string `type:"string" required:"true"` - // AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the - // ML storage volume attached to the instance. + // Amazon Web Services KMS key ID Amazon SageMaker uses to encrypt data when + // storing it on the ML storage volume attached to the instance. KmsKeyId *string `type:"string"` // An array of ProductionVariant objects, one for each model that you want to @@ -41062,7 +41131,8 @@ type DescribeFeatureGroupOutput struct { NextToken *string `type:"string" required:"true"` // The configuration of the OfflineStore, inducing the S3 location of the OfflineStore, - // AWS Glue or AWS Hive data catalogue configurations, and the security configuration. + // Amazon Web Services Glue or Amazon Web Services Hive data catalogue configurations, + // and the security configuration. OfflineStoreConfig *OfflineStoreConfig `type:"structure"` // The status of the OfflineStore. Notifies you if replicating data into the @@ -41262,8 +41332,8 @@ type DescribeFlowDefinitionOutput struct { // OutputConfig is a required field OutputConfig *FlowDefinitionOutputConfig `type:"structure" required:"true"` - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) execution role for the flow definition. + // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access + // Management (IAM) execution role for the flow definition. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -42096,8 +42166,8 @@ type DescribeLabelingJobOutput struct { // LastModifiedTime is a required field LastModifiedTime *time.Time `type:"timestamp" required:"true"` - // The location of the job's output data and the AWS Key Management Service - // key ID for the key used to encrypt the output data, if any. + // The location of the job's output data and the Amazon Web Services Key Management + // Service key ID for the key used to encrypt the output data, if any. // // OutputConfig is a required field OutputConfig *LabelingJobOutputConfig `type:"structure" required:"true"` @@ -42112,9 +42182,10 @@ type DescribeLabelingJobOutput struct { // are met, the job is automatically stopped. StoppingConditions *LabelingJobStoppingConditions `type:"structure"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` } @@ -42240,7 +42311,7 @@ type DescribeModelBiasJobDefinitionInput struct { _ struct{} `type:"structure"` // The name of the model bias job definition. The name must be unique within - // an AWS Region in the AWS account. + // an Amazon Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -42291,8 +42362,8 @@ type DescribeModelBiasJobDefinitionOutput struct { // JobDefinitionArn is a required field JobDefinitionArn *string `type:"string" required:"true"` - // The name of the bias job definition. The name must be unique within an AWS - // Region in the AWS account. + // The name of the bias job definition. The name must be unique within an Amazon + // Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -42323,9 +42394,9 @@ type DescribeModelBiasJobDefinitionOutput struct { // Networking options for a model bias job. NetworkConfig *MonitoringNetworkConfig `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that has read permission to the input data location and write - // permission to the output data location in Amazon S3. + // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access + // Management (IAM) role that has read permission to the input data location + // and write permission to the output data location in Amazon S3. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -42414,7 +42485,7 @@ type DescribeModelExplainabilityJobDefinitionInput struct { _ struct{} `type:"structure"` // The name of the model explainability job definition. The name must be unique - // within an AWS Region in the AWS account. + // within an Amazon Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -42466,7 +42537,7 @@ type DescribeModelExplainabilityJobDefinitionOutput struct { JobDefinitionArn *string `type:"string" required:"true"` // The name of the explainability job definition. The name must be unique within - // an AWS Region in the AWS account. + // an Amazon Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -42498,9 +42569,9 @@ type DescribeModelExplainabilityJobDefinitionOutput struct { // Networking options for a model explainability job. NetworkConfig *MonitoringNetworkConfig `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that has read permission to the input data location and write - // permission to the output data location in Amazon S3. + // The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access + // Management (IAM) role that has read permission to the input data location + // and write permission to the output data location in Amazon S3. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -42901,7 +42972,8 @@ type DescribeModelPackageOutput struct { // A description provided for the model approval. ApprovalDescription *string `type:"string"` - // Whether the model package is certified for listing on AWS Marketplace. + // Whether the model package is certified for listing on Amazon Web Services + // Marketplace. CertifyForMarketplace *bool `type:"boolean"` // Information about the user who created or modified an experiment, trial, @@ -43098,8 +43170,8 @@ func (s *DescribeModelPackageOutput) SetValidationSpecification(v *ModelPackageV type DescribeModelQualityJobDefinitionInput struct { _ struct{} `type:"structure"` - // The name of the model quality job. The name must be unique within an AWS - // Region in the AWS account. + // The name of the model quality job. The name must be unique within an Amazon + // Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -43151,7 +43223,7 @@ type DescribeModelQualityJobDefinitionOutput struct { JobDefinitionArn *string `type:"string" required:"true"` // The name of the quality job definition. The name must be unique within an - // AWS Region in the AWS account. + // Amazon Web Services Region in the Amazon Web Services account. // // JobDefinitionName is a required field JobDefinitionName *string `min:"1" type:"string" required:"true"` @@ -43593,9 +43665,10 @@ type DescribeNotebookInstanceOutput struct { // An array of up to three Git repositories associated with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) - // or in any other Git repository. These repositories are cloned at the same - // level as the default repository of your notebook instance. For more information, + // your account, or the URL of Git repositories in Amazon Web Services CodeCommit + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // in any other Git repository. These repositories are cloned at the same level + // as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` @@ -43606,8 +43679,9 @@ type DescribeNotebookInstanceOutput struct { // The Git repository associated with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) - // or in any other Git repository. When you open a notebook instance, it opens + // in your account, or the URL of a Git repository in Amazon Web Services CodeCommit + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` @@ -43627,8 +43701,8 @@ type DescribeNotebookInstanceOutput struct { // The type of ML compute instance running on the notebook instance. InstanceType *string `type:"string" enum:"InstanceType"` - // The AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it - // on the ML storage volume attached to the instance. + // The Amazon Web Services KMS key ID Amazon SageMaker uses to encrypt data + // when storing it on the ML storage volume attached to the instance. KmsKeyId *string `type:"string"` // A timestamp. Use this parameter to retrieve the time when the notebook instance @@ -44200,8 +44274,8 @@ func (s *DescribePipelineOutput) SetRoleArn(v string) *DescribePipelineOutput { type DescribeProcessingJobInput struct { _ struct{} `type:"structure"` - // The name of the processing job. The name must be unique within an AWS Region - // in the AWS account. + // The name of the processing job. The name must be unique within an Amazon + // Web Services Region in the Amazon Web Services account. // // ProcessingJobName is a required field ProcessingJobName *string `min:"1" type:"string" required:"true"` @@ -44290,8 +44364,8 @@ type DescribeProcessingJobOutput struct { // ProcessingJobArn is a required field ProcessingJobArn *string `type:"string" required:"true"` - // The name of the processing job. The name must be unique within an AWS Region - // in the AWS account. + // The name of the processing job. The name must be unique within an Amazon + // Web Services Region in the Amazon Web Services account. // // ProcessingJobName is a required field ProcessingJobName *string `min:"1" type:"string" required:"true"` @@ -44541,7 +44615,7 @@ type DescribeProjectOutput struct { ServiceCatalogProvisionedProductDetails *ServiceCatalogProvisionedProductDetails `type:"structure"` // Information used to provision a service catalog product. For information, - // see What is AWS Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). + // see What is Amazon Web Services Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). // // ServiceCatalogProvisioningDetails is a required field ServiceCatalogProvisioningDetails *ServiceCatalogProvisioningDetails `type:"structure" required:"true"` @@ -44846,8 +44920,8 @@ type DescribeTrainingJobOutput struct { // The number of times to retry the job when the job fails due to an InternalServerError. RetryStrategy *RetryStrategy `type:"structure"` - // The AWS Identity and Access Management (IAM) role configured for the training - // job. + // The Amazon Web Services Identity and Access Management (IAM) role configured + // for the training job. RoleArn *string `min:"20" type:"string"` // Provides detailed information about the state of the training job. For detailed @@ -46081,9 +46155,10 @@ type DescribeWorkforceOutput struct { // A single private workforce, which is automatically created when you create // your first private work team. You can create one private work force in each - // AWS Region. By default, any workforce-related API operation used in a specific - // region will apply to the workforce created in that region. To learn how to - // create a private workforce, see Create a Private Workforce (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). + // Amazon Web Services Region. By default, any workforce-related API operation + // used in a specific region will apply to the workforce created in that region. + // To learn how to create a private workforce, see Create a Private Workforce + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). // // Workforce is a required field Workforce *Workforce `type:"structure" required:"true"` @@ -46243,7 +46318,7 @@ type Device struct { // DeviceName is a required field DeviceName *string `min:"1" type:"string" required:"true"` - // AWS Internet of Things (IoT) object name. + // Amazon Web Services Internet of Things (IoT) object name. IotThingName *string `type:"string"` } @@ -46406,7 +46481,8 @@ type DeviceSummary struct { // DeviceName is a required field DeviceName *string `min:"1" type:"string" required:"true"` - // The AWS Internet of Things (IoT) object thing name associated with the device.. + // The Amazon Web Services Internet of Things (IoT) object thing name associated + // with the device.. IotThingName *string `type:"string"` // The last heartbeat received from the device. @@ -46852,10 +46928,10 @@ func (s *EdgeModelSummary) SetModelVersion(v string) *EdgeModelSummary { type EdgeOutputConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt data on the storage volume after compilation job. If you don't provide - // a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for - // your role's account. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt data on the storage volume after + // compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses + // the default KMS key for Amazon S3 for your role's account. KmsKeyId *string `type:"string"` // The configuration used to create deployment artifacts. Specify configuration @@ -46868,11 +46944,11 @@ type EdgeOutputConfig struct { // // * ComponentDescription (optional) - Description of the component. // - // * ComponentVersion (optional) - The version of the component. AWS IoT - // Greengrass uses semantic versions for components. Semantic versions follow - // a major.minor.patch number system. For example, version 1.0.0 represents - // the first major release for a component. For more information, see the - // semantic version specification (https://semver.org/). + // * ComponentVersion (optional) - The version of the component. Amazon Web + // Services IoT Greengrass uses semantic versions for components. Semantic + // versions follow a major.minor.patch number system. For example, version + // 1.0.0 represents the first major release for a component. For more information, + // see the semantic version specification (https://semver.org/). // // * PlatformOS (optional) - The name of the operating system for the platform. // Supported platforms include Windows and Linux. @@ -46883,7 +46959,7 @@ type EdgeOutputConfig struct { PresetDeploymentConfig *string `type:"string"` // The deployment type SageMaker Edge Manager will create. Currently only supports - // AWS IoT Greengrass Version 2 components. + // Amazon Web Services IoT Greengrass Version 2 components. PresetDeploymentType *string `type:"string" enum:"EdgePresetDeploymentType"` // The Amazon Simple Storage (S3) bucker URI. @@ -47046,7 +47122,7 @@ type EdgePresetDeploymentOutput struct { StatusMessage *string `type:"string"` // The deployment type created by SageMaker Edge Manager. Currently only supports - // AWS IoT Greengrass Version 2 components. + // Amazon Web Services IoT Greengrass Version 2 components. // // Type is a required field Type *string `type:"string" required:"true" enum:"EdgePresetDeploymentType"` @@ -47162,8 +47238,8 @@ type Endpoint struct { ProductionVariants []*ProductionVariantSummary `min:"1" type:"list"` // A list of the tags associated with the endpoint. For more information, see - // Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // in the AWS General Reference Guide. + // Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. Tags []*Tag `type:"list"` } @@ -47949,16 +48025,17 @@ type FeatureGroup struct { // Provide an OfflineStoreConfig in a request to CreateFeatureGroup to create // an OfflineStore. // - // To encrypt an OfflineStore using at rest data encryption, specify AWS Key - // Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig. + // To encrypt an OfflineStore using at rest data encryption, specify Amazon + // Web Services Key Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig. OfflineStoreConfig *OfflineStoreConfig `type:"structure"` // The status of OfflineStore. OfflineStoreStatus *OfflineStoreStatus `type:"structure"` - // Use this to specify the AWS Key Management Service (KMS) Key ID, or KMSKeyId, - // for at rest data encryption. You can turn OnlineStore on or off by specifying - // the EnableOnlineStore flag at General Assembly; the default value is False. + // Use this to specify the Amazon Web Services Key Management Service (KMS) + // Key ID, or KMSKeyId, for at rest data encryption. You can turn OnlineStore + // on or off by specifying the EnableOnlineStore flag at General Assembly; the + // default value is False. OnlineStoreConfig *OnlineStoreConfig `type:"structure"` // The name of the Feature whose value uniquely identifies a Record defined @@ -48978,7 +49055,8 @@ func (s *GetSearchSuggestionsOutput) SetPropertyNameSuggestions(v []*PropertyNam return s } -// Specifies configuration details for a Git repository in your AWS account. +// Specifies configuration details for a Git repository in your Amazon Web Services +// account. type GitConfig struct { _ struct{} `type:"structure"` @@ -48990,9 +49068,10 @@ type GitConfig struct { // RepositoryUrl is a required field RepositoryUrl *string `type:"string" required:"true"` - // The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains - // the credentials used to access the git repository. The secret must have a - // staging label of AWSCURRENT and must be in the following format: + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret that contains the credentials used to access the git repository. The + // secret must have a staging label of AWSCURRENT and must be in the following + // format: // // {"username": UserName, "password": Password} SecretArn *string `min:"1" type:"string"` @@ -49050,9 +49129,10 @@ func (s *GitConfig) SetSecretArn(v string) *GitConfig { type GitConfigForUpdate struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS Secrets Manager secret that contains - // the credentials used to access the git repository. The secret must have a - // staging label of AWSCURRENT and must be in the following format: + // The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager + // secret that contains the credentials used to access the git repository. The + // secret must have a staging label of AWSCURRENT and must be in the following + // format: // // {"username": UserName, "password": Password} SecretArn *string `min:"1" type:"string"` @@ -50258,7 +50338,7 @@ type HumanTaskConfig struct { // // * If you choose a private or vendor workforce, the default value is 10 // days (864,000 seconds). For most users, the maximum is also 10 days. If - // you want to change this limit, contact AWS Support. + // you want to change this limit, contact Amazon Web Services Support. TaskAvailabilityLifetimeInSeconds *int64 `min:"60" type:"integer"` // A description of the task for your human workers. @@ -50285,7 +50365,7 @@ type HumanTaskConfig struct { // * For 3D point cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-point-cloud.html) // and video frame (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-video.html) // labeling jobs, the maximum is 7 days (604,800 seconds). If you want to - // change these limits, contact AWS Support. + // change these limits, contact Amazon Web Services Support. // // TaskTimeLimitInSeconds is a required field TaskTimeLimitInSeconds *int64 `min:"30" type:"integer" required:"true"` @@ -52803,7 +52883,8 @@ type LabelingJobForWorkteamSummary struct { // The configured number of workers per data object. NumberOfHumanWorkersPerDataObject *int64 `min:"1" type:"integer"` - // The AWS account ID of the account used to start the labeling job. + // The Amazon Web Services account ID of the account used to start the labeling + // job. // // WorkRequesterAccountId is a required field WorkRequesterAccountId *string `type:"string" required:"true"` @@ -52948,15 +53029,16 @@ func (s *LabelingJobOutput) SetOutputDatasetS3Uri(v string) *LabelingJobOutput { type LabelingJobOutputConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service ID of the key used to encrypt the output data, - // if any. + // The Amazon Web Services Key Management Service ID of the key used to encrypt + // the output data, if any. // // If you provide your own KMS key ID, you must add the required permissions // to your KMS key described in Encrypt Output Data and Storage Volume with - // AWS KMS (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-security-permission.html#sms-security-kms-permissions). + // Amazon Web Services KMS (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-security-permission.html#sms-security-kms-permissions). // - // If you don't provide a KMS key ID, Amazon SageMaker uses the default AWS - // KMS key for Amazon S3 for your role's account to encrypt your output data. + // If you don't provide a KMS key ID, Amazon SageMaker uses the default Amazon + // Web Services KMS key for Amazon S3 for your role's account to encrypt your + // output data. // // If you use a bucket policy with an s3:PutObject permission that only allows // objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption @@ -53028,16 +53110,17 @@ func (s *LabelingJobOutputConfig) SetSnsTopicArn(v string) *LabelingJobOutputCon type LabelingJobResourceConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt data on the storage volume attached to the ML compute instance(s) - // that run the training and inference jobs used for automated data labeling. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt data on the storage volume attached + // to the ML compute instance(s) that run the training and inference jobs used + // for automated data labeling. // // You can only specify a VolumeKmsKeyId when you create a labeling job with // automated data labeling enabled using the API operation CreateLabelingJob. - // You cannot specify an AWS KMS customer managed CMK to encrypt the storage - // volume used for automated data labeling model training and inference when - // you create a labeling job using the console. To learn more, see Output Data - // and Storage Volume Encryption (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-security.html). + // You cannot specify an Amazon Web Services KMS customer managed CMK to encrypt + // the storage volume used for automated data labeling model training and inference + // when you create a labeling job using the console. To learn more, see Output + // Data and Storage Volume Encryption (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-security.html). // // The VolumeKmsKeyId can be any of the following formats: // @@ -54595,8 +54678,8 @@ type ListCodeRepositoriesOutput struct { // * Last modified time // // * Configuration information, including the URL location of the repository - // and the ARN of the AWS Secrets Manager secret that contains the credentials - // used to access the repository. + // and the ARN of the Amazon Web Services Secrets Manager secret that contains + // the credentials used to access the repository. // // CodeRepositorySummaryList is a required field CodeRepositorySummaryList []*CodeRepositorySummary `type:"list" required:"true"` @@ -57446,7 +57529,7 @@ func (s *ListModelPackageGroupsInput) SetSortOrder(v string) *ListModelPackageGr type ListModelPackageGroupsOutput struct { _ struct{} `type:"structure"` - // A list of summaries of the model groups in your AWS account. + // A list of summaries of the model groups in your Amazon Web Services account. // // ModelPackageGroupSummaryList is a required field ModelPackageGroupSummaryList []*ModelPackageGroupSummary `type:"list" required:"true"` @@ -61592,9 +61675,10 @@ type ModelPackage struct { // A description provided when the model approval is set. ApprovalDescription *string `type:"string"` - // Whether the model package is to be certified to be listed on AWS Marketplace. - // For information about listing model packages on AWS Marketplace, see List - // Your Algorithm or Model Package on AWS Marketplace (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-mkt-list.html). + // Whether the model package is to be certified to be listed on Amazon Web Services + // Marketplace. For information about listing model packages on Amazon Web Services + // Marketplace, see List Your Algorithm or Model Package on Amazon Web Services + // Marketplace (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-mkt-list.html). CertifyForMarketplace *bool `type:"boolean"` // Information about the user who created or modified an experiment, trial, @@ -61664,8 +61748,8 @@ type ModelPackage struct { SourceAlgorithmSpecification *SourceAlgorithmSpecification `type:"structure"` // A list of the tags associated with the model package. For more information, - // see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // in the AWS General Reference Guide. + // see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. Tags []*Tag `type:"list"` // Specifies batch transform jobs that Amazon SageMaker runs to validate your @@ -61810,6 +61894,11 @@ type ModelPackageContainerDefinition struct { // The DNS host name for the Docker container. ContainerHostname *string `type:"string"` + // The environment variables to set in the Docker container. Each key and value + // in the Environment string to string map can have length of up to 1024. We + // support up to 16 entries in the map. + Environment map[string]*string `type:"map"` + // The Amazon EC2 Container Registry (Amazon ECR) path where inference code // is stored. // @@ -61834,7 +61923,7 @@ type ModelPackageContainerDefinition struct { // the model package. ModelDataUrl *string `type:"string"` - // The AWS Marketplace product ID of the model package. + // The Amazon Web Services Marketplace product ID of the model package. ProductId *string `type:"string"` } @@ -61867,6 +61956,12 @@ func (s *ModelPackageContainerDefinition) SetContainerHostname(v string) *ModelP return s } +// SetEnvironment sets the Environment field's value. +func (s *ModelPackageContainerDefinition) SetEnvironment(v map[string]*string) *ModelPackageContainerDefinition { + s.Environment = v + return s +} + // SetImage sets the Image field's value. func (s *ModelPackageContainerDefinition) SetImage(v string) *ModelPackageContainerDefinition { s.Image = &v @@ -61927,8 +62022,8 @@ type ModelPackageGroup struct { ModelPackageGroupStatus *string `type:"string" enum:"ModelPackageGroupStatus"` // A list of the tags associated with the model group. For more information, - // see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // in the AWS General Reference Guide. + // see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. Tags []*Tag `type:"list"` } @@ -62239,7 +62334,7 @@ func (s *ModelPackageSummary) SetModelPackageVersion(v int64) *ModelPackageSumma // in the process of validating the model package. // // The data provided in the validation profile is made available to your buyers -// on AWS Marketplace. +// on Amazon Web Services Marketplace. type ModelPackageValidationProfile struct { _ struct{} `type:"structure"` @@ -62867,9 +62962,9 @@ type MonitoringClusterConfig struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"ProcessingInstanceType"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt data on the storage volume attached to the ML compute instance(s) - // that run the model monitoring job. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt data on the storage volume attached + // to the ML compute instance(s) that run the model monitoring job. VolumeKmsKeyId *string `type:"string"` // The size of the ML storage volume, in gigabytes, that you want to provision. @@ -63500,8 +63595,9 @@ func (s *MonitoringOutput) SetS3Output(v *MonitoringS3Output) *MonitoringOutput type MonitoringOutputConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt the model artifacts at rest using Amazon S3 server-side encryption. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt the model artifacts at rest using + // Amazon S3 server-side encryption. KmsKeyId *string `type:"string"` // Monitoring outputs for monitoring jobs. This is where the output of the periodic @@ -63714,8 +63810,8 @@ type MonitoringSchedule struct { MonitoringType *string `type:"string" enum:"MonitoringType"` // A list of the tags associated with the monitoring schedlue. For more information, - // see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) - // in the AWS General Reference Guide. + // see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. Tags []*Tag `type:"list"` } @@ -64066,12 +64162,22 @@ func (s *MultiModelConfig) SetModelCacheSetting(v string) *MultiModelConfig { return s } +// The VpcConfig configuration object that specifies the VPC that you want the +// compilation jobs to connect to. For more information on controlling access +// to your Amazon S3 buckets used for compilation job, see Give Amazon SageMaker +// Compilation Jobs Access to Resources in Your Amazon VPC (https://docs.aws.amazon.com/sagemaker/latest/dg/neo-vpc.html). type NeoVpcConfig struct { _ struct{} `type:"structure"` + // The VPC security group IDs. IDs have the form of sg-xxxxxxxx. Specify the + // security groups for the VPC that is specified in the Subnets field. + // // SecurityGroupIds is a required field SecurityGroupIds []*string `min:"1" type:"list" required:"true"` + // The ID of the subnets in the VPC that you want to connect the compilation + // job to for accessing the model in Amazon S3. + // // Subnets is a required field Subnets []*string `min:"1" type:"list" required:"true"` } @@ -64382,9 +64488,10 @@ type NotebookInstanceSummary struct { // An array of up to three Git repositories associated with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) - // or in any other Git repository. These repositories are cloned at the same - // level as the default repository of your notebook instance. For more information, + // your account, or the URL of Git repositories in Amazon Web Services CodeCommit + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // in any other Git repository. These repositories are cloned at the same level + // as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` @@ -64394,8 +64501,9 @@ type NotebookInstanceSummary struct { // The Git repository associated with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) - // or in any other Git repository. When you open a notebook instance, it opens + // in your account, or the URL of a Git repository in Amazon Web Services CodeCommit + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` @@ -64581,8 +64689,8 @@ func (s *ObjectiveStatusCounters) SetSucceeded(v int64) *ObjectiveStatusCounters // Provide an OfflineStoreConfig in a request to CreateFeatureGroup to create // an OfflineStore. // -// To encrypt an OfflineStore using at rest data encryption, specify AWS Key -// Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig. +// To encrypt an OfflineStore using at rest data encryption, specify Amazon +// Web Services Key Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig. type OfflineStoreConfig struct { _ struct{} `type:"structure"` @@ -64590,8 +64698,8 @@ type OfflineStoreConfig struct { // is created. DataCatalogConfig *DataCatalogConfig `type:"structure"` - // Set to True to disable the automatic creation of an AWS Glue table when configuring - // an OfflineStore. + // Set to True to disable the automatic creation of an Amazon Web Services Glue + // table when configuring an OfflineStore. DisableGlueTableCreation *bool `type:"boolean"` // The Amazon Simple Storage (Amazon S3) location of OfflineStore. @@ -64954,9 +65062,10 @@ func (s *OidcMemberDefinition) SetGroups(v []*string) *OidcMemberDefinition { return s } -// Use this to specify the AWS Key Management Service (KMS) Key ID, or KMSKeyId, -// for at rest data encryption. You can turn OnlineStore on or off by specifying -// the EnableOnlineStore flag at General Assembly; the default value is False. +// Use this to specify the Amazon Web Services Key Management Service (KMS) +// Key ID, or KMSKeyId, for at rest data encryption. You can turn OnlineStore +// on or off by specifying the EnableOnlineStore flag at General Assembly; the +// default value is False. type OnlineStoreConfig struct { _ struct{} `type:"structure"` @@ -64996,9 +65105,9 @@ func (s *OnlineStoreConfig) SetSecurityConfig(v *OnlineStoreSecurityConfig) *Onl type OnlineStoreSecurityConfig struct { _ struct{} `type:"structure"` - // The ID of the AWS Key Management Service (AWS KMS) key that SageMaker Feature - // Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side - // encryption. + // The ID of the Amazon Web Services Key Management Service (Amazon Web Services + // KMS) key that SageMaker Feature Store uses to encrypt the Amazon S3 objects + // at rest using Amazon S3 server-side encryption. // // The caller (either IAM user or IAM role) of CreateFeatureGroup must have // below permissions to the OnlineStore KmsKeyId: @@ -65234,9 +65343,10 @@ func (s *OutputConfig) SetTargetPlatform(v *TargetPlatform) *OutputConfig { type OutputDataConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt the model artifacts at rest using Amazon S3 server-side encryption. - // The KmsKeyId can be any of the following formats: + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt the model artifacts at rest using + // Amazon S3 server-side encryption. The KmsKeyId can be any of the following + // formats: // // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // @@ -65258,8 +65368,9 @@ type OutputDataConfig struct { // // The KMS key policy must grant permission to the IAM role that you specify // in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob - // requests. For more information, see Using Key Policies in AWS KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) - // in the AWS Key Management Service Developer Guide. + // requests. For more information, see Using Key Policies in Amazon Web Services + // KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the Amazon Web Services Key Management Service Developer Guide. KmsKeyId *string `type:"string"` // Identifies the S3 path where you want Amazon SageMaker to store the model @@ -66258,9 +66369,9 @@ type ProcessingClusterConfig struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"ProcessingInstanceType"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt data on the storage volume attached to the ML compute instance(s) - // that run the processing job. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt data on the storage volume attached + // to the ML compute instance(s) that run the processing job. // // Certain Nitro-based instances include local storage, dependent on the instance // type. Local storage volumes are encrypted using a hardware module on the @@ -66558,7 +66669,7 @@ type ProcessingJob struct { // An array of key-value pairs. For more information, see Using Cost Allocation // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) - // in the AWS Billing and Cost Management User Guide. + // in the Amazon Web Services Billing and Cost Management User Guide. Tags []*Tag `type:"list"` // The ARN of the training job associated with this processing job. @@ -66912,10 +67023,10 @@ func (s *ProcessingOutput) SetS3Output(v *ProcessingS3Output) *ProcessingOutput type ProcessingOutputConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt the processing job output. KmsKeyId can be an ID of a KMS key, ARN - // of a KMS key, alias of a KMS key, or alias of a KMS key. The KmsKeyId is - // applied to all outputs. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt the processing job output. KmsKeyId + // can be an ID of a KMS key, ARN of a KMS key, alias of a KMS key, or alias + // of a KMS key. The KmsKeyId is applied to all outputs. KmsKeyId *string `type:"string"` // An array of outputs configuring the data to upload from the processing container. @@ -67376,9 +67487,10 @@ type ProductionVariantCoreDumpConfig struct { // DestinationS3Uri is a required field DestinationS3Uri *string `type:"string" required:"true"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt the core dump data at rest using Amazon S3 server-side encryption. - // The KmsKeyId can be any of the following formats: + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt the core dump data at rest using + // Amazon S3 server-side encryption. The KmsKeyId can be any of the following + // formats: // // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // @@ -67400,8 +67512,8 @@ type ProductionVariantCoreDumpConfig struct { // // The KMS key policy must grant permission to the IAM role that you specify // in your CreateEndpoint and UpdateEndpoint requests. For more information, - // see Using Key Policies in AWS KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) - // in the AWS Key Management Service Developer Guide. + // see Using Key Policies in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the Amazon Web Services Key Management Service Developer Guide. KmsKeyId *string `type:"string"` } @@ -67950,7 +68062,7 @@ func (s *PropertyNameSuggestion) SetPropertyName(v string) *PropertyNameSuggesti } // A key value pair used when you provision a project as a service catalog product. -// For information, see What is AWS Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). +// For information, see What is Amazon Web Services Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). type ProvisioningParameter struct { _ struct{} `type:"structure"` @@ -68333,8 +68445,8 @@ type RedshiftDatasetDefinition struct { // DbUser is a required field DbUser *string `min:"1" type:"string" required:"true"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt data from a Redshift execution. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt data from a Redshift execution. KmsKeyId *string `type:"string"` // The compression used for Redshift query results. @@ -68802,11 +68914,12 @@ func (s *RenderingError) SetMessage(v string) *RenderingError { type RepositoryAuthConfig struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials - // to authenticate to the private Docker registry where your model image is - // hosted. For information about how to create an AWS Lambda function, see Create - // a Lambda function with the console (https://docs.aws.amazon.com/lambda/latest/dg/getting-started-create-function.html) - // in the AWS Lambda Developer Guide. + // The Amazon Resource Name (ARN) of an Amazon Web Services Lambda function + // that provides credentials to authenticate to the private Docker registry + // where your model image is hosted. For information about how to create an + // Amazon Web Services Lambda function, see Create a Lambda function with the + // console (https://docs.aws.amazon.com/lambda/latest/dg/getting-started-create-function.html) + // in the Amazon Web Services Lambda Developer Guide. // // RepositoryCredentialsProviderArn is a required field RepositoryCredentialsProviderArn *string `min:"1" type:"string" required:"true"` @@ -68903,8 +69016,9 @@ type ResourceConfig struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"TrainingInstanceType"` - // The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage - // volume attached to the ML compute instance(s) that run the training job. + // The Amazon Web Services KMS key that Amazon SageMaker uses to encrypt data + // on the storage volume attached to the ML compute instance(s) that run the + // training job. // // Certain Nitro-based instances include local storage, dependent on the instance // type. Local storage volumes are encrypted using a hardware module on the @@ -69478,8 +69592,8 @@ func (s *S3DataSource) SetS3Uri(v string) *S3DataSource { type S3StorageConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service (KMS) key ID of the key used to encrypt any - // objects written into the OfflineStore S3 location. + // The Amazon Web Services Key Management Service (KMS) key ID of the key used + // to encrypt any objects written into the OfflineStore S3 location. // // The IAM roleARN that is passed as a parameter to CreateFeatureGroup must // have below permissions to the KmsKeyId: @@ -70301,7 +70415,7 @@ func (s *SendPipelineExecutionStepSuccessOutput) SetPipelineExecutionArn(v strin } // Details of a provisioned service catalog product. For information about service -// catalog, see What is AWS Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). +// catalog, see What is Amazon Web Services Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). type ServiceCatalogProvisionedProductDetails struct { _ struct{} `type:"structure"` @@ -70355,7 +70469,7 @@ func (s *ServiceCatalogProvisionedProductDetails) SetProvisionedProductStatusMes } // Details that you specify to provision a service catalog product. For information -// about service catalog, see .What is AWS Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). +// about service catalog, see .What is Amazon Web Services Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). type ServiceCatalogProvisioningDetails struct { _ struct{} `type:"structure"` @@ -70457,9 +70571,9 @@ type SharingSettings struct { // default is Disabled. NotebookOutputOption *string `type:"string" enum:"NotebookOutputOption"` - // When NotebookOutputOption is Allowed, the AWS Key Management Service (KMS) - // encryption key ID used to encrypt the notebook cell output in the Amazon - // S3 bucket. + // When NotebookOutputOption is Allowed, the Amazon Web Services Key Management + // Service (KMS) encryption key ID used to encrypt the notebook cell output + // in the Amazon S3 bucket. S3KmsKeyId *string `type:"string"` // When NotebookOutputOption is Allowed, the Amazon S3 bucket used to store @@ -70549,13 +70663,13 @@ func (s *ShuffleConfig) SetSeed(v int64) *ShuffleConfig { // Specifies an algorithm that was used to create the model package. The algorithm // must be either an algorithm resource in your Amazon SageMaker account or -// an algorithm in AWS Marketplace that you are subscribed to. +// an algorithm in Amazon Web Services Marketplace that you are subscribed to. type SourceAlgorithm struct { _ struct{} `type:"structure"` // The name of an algorithm that was used to create the model package. The algorithm // must be either an algorithm resource in your Amazon SageMaker account or - // an algorithm in AWS Marketplace that you are subscribed to. + // an algorithm in Amazon Web Services Marketplace that you are subscribed to. // // AlgorithmName is a required field AlgorithmName *string `min:"1" type:"string" required:"true"` @@ -71739,18 +71853,18 @@ func (s *SuggestionQuery) SetPropertyNameQuery(v *PropertyNameQuery) *Suggestion } // A tag object that consists of a key and an optional value, used to manage -// metadata for Amazon SageMaker AWS resources. +// metadata for Amazon SageMaker Amazon Web Services resources. // // You can add tags to notebook instances, training jobs, hyperparameter tuning // jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, // and endpoints. For more information on adding tags to Amazon SageMaker resources, // see AddTags. // -// For more information on adding metadata to your AWS resources with tagging, -// see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). -// For advice on best practices for managing AWS resources with tagging, see -// Tagging Best Practices: Implement an Effective AWS Resource Tagging Strategy -// (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +// For more information on adding metadata to your Amazon Web Services resources +// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +// For advice on best practices for managing Amazon Web Services resources with +// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). type Tag struct { _ struct{} `type:"structure"` @@ -72125,8 +72239,8 @@ type TrainingJob struct { // The number of times to retry the job when the job fails due to an InternalServerError. RetryStrategy *RetryStrategy `type:"structure"` - // The AWS Identity and Access Management (IAM) role configured for the training - // job. + // The Amazon Web Services Identity and Access Management (IAM) role configured + // for the training job. RoleArn *string `min:"20" type:"string"` // Provides detailed information about the state of the training job. For detailed @@ -72194,9 +72308,10 @@ type TrainingJob struct { // window to save the model artifacts, so the results of training are not lost. StoppingCondition *StoppingCondition `type:"structure"` - // An array of key-value pairs. You can use tags to categorize your AWS resources - // in different ways, for example, by purpose, owner, or environment. For more - // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). Tags []*Tag `type:"list"` // Configuration of storage locations for the Debugger TensorBoard output data. @@ -73628,9 +73743,10 @@ type TransformOutput struct { // every transformed record, specify Line. AssembleWith *string `type:"string" enum:"AssemblyType"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt the model artifacts at rest using Amazon S3 server-side encryption. - // The KmsKeyId can be any of the following formats: + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt the model artifacts at rest using + // Amazon S3 server-side encryption. The KmsKeyId can be any of the following + // formats: // // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // @@ -73647,8 +73763,8 @@ type TransformOutput struct { // // The KMS key policy must grant permission to the IAM role that you specify // in your CreateModel request. For more information, see Using Key Policies - // in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) - // in the AWS Key Management Service Developer Guide. + // in Amazon Web Services KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the Amazon Web Services Key Management Service Developer Guide. KmsKeyId *string `type:"string"` // The Amazon S3 path where you want Amazon SageMaker to store the results of @@ -73735,9 +73851,9 @@ type TransformResources struct { // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"TransformInstanceType"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt model data on the storage volume attached to the ML compute instance(s) - // that run the batch transform job. + // The Amazon Web Services Key Management Service (Amazon Web Services KMS) + // key that Amazon SageMaker uses to encrypt model data on the storage volume + // attached to the ML compute instance(s) that run the batch transform job. // // Certain Nitro-based instances include local storage, dependent on the instance // type. Local storage volumes are encrypted using a hardware module on the @@ -74896,8 +75012,8 @@ type UiConfig struct { // Use this parameter when you are creating a labeling job for 3D point cloud // and video fram labeling jobs. Use your labeling job task type to select one // of the following ARNs and use it with this parameter when you create a labeling - // job. Replace aws-region with the AWS region you are creating your labeling - // job in. + // job. Replace aws-region with the Amazon Web Services region you are creating + // your labeling job in. // // 3D Point Cloud HumanTaskUiArns // @@ -75308,9 +75424,9 @@ type UpdateCodeRepositoryInput struct { CodeRepositoryName *string `min:"1" type:"string" required:"true"` // The configuration of the git repository, including the URL and the Amazon - // Resource Name (ARN) of the AWS Secrets Manager secret that contains the credentials - // used to access the repository. The secret must have a staging label of AWSCURRENT - // and must be in the following format: + // Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that + // contains the credentials used to access the repository. The secret must have + // a staging label of AWSCURRENT and must be in the following format: // // {"username": UserName, "password": Password} GitConfig *GitConfigForUpdate `type:"structure"` @@ -75486,8 +75602,8 @@ type UpdateDeviceFleetInput struct { // DeviceFleetName is a required field DeviceFleetName *string `min:"1" type:"string" required:"true"` - // Whether to create an AWS IoT Role Alias during device fleet creation. The - // name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}". + // Whether to create an Amazon Web Services IoT Role Alias during device fleet + // creation. The name of the role alias generated will match this pattern: "SageMakerEdge-{DeviceFleetName}". // // For example, if your device fleet is called "demo-fleet", the name of the // role alias will be "SageMakerEdge-demo-fleet". @@ -76254,8 +76370,8 @@ type UpdateMonitoringScheduleInput struct { // MonitoringScheduleConfig is a required field MonitoringScheduleConfig *MonitoringScheduleConfig `type:"structure" required:"true"` - // The name of the monitoring schedule. The name must be unique within an AWS - // Region within an AWS account. + // The name of the monitoring schedule. The name must be unique within an Amazon + // Web Services Region within an Amazon Web Services account. // // MonitoringScheduleName is a required field MonitoringScheduleName *string `min:"1" type:"string" required:"true"` @@ -76343,18 +76459,19 @@ type UpdateNotebookInstanceInput struct { // An array of up to three Git repositories to associate with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) - // or in any other Git repository. These repositories are cloned at the same - // level as the default repository of your notebook instance. For more information, + // your account, or the URL of Git repositories in Amazon Web Services CodeCommit + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // in any other Git repository. These repositories are cloned at the same level + // as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // The Git repository to associate with the notebook instance as its default // code repository. This can be either the name of a Git repository stored as - // a resource in your account, or the URL of a Git repository in AWS CodeCommit - // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or - // in any other Git repository. When you open a notebook instance, it opens + // a resource in your account, or the URL of a Git repository in Amazon Web + // Services CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // or in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` @@ -77347,9 +77464,10 @@ type UpdateWorkforceOutput struct { _ struct{} `type:"structure"` // A single private workforce. You can create one private work force in each - // AWS Region. By default, any workforce-related API operation used in a specific - // region will apply to the workforce created in that region. To learn how to - // create a private workforce, see Create a Private Workforce (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). + // Amazon Web Services Region. By default, any workforce-related API operation + // used in a specific region will apply to the workforce created in that region. + // To learn how to create a private workforce, see Create a Private Workforce + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). // // Workforce is a required field Workforce *Workforce `type:"structure" required:"true"` @@ -77824,9 +77942,10 @@ func (s *VpcConfig) SetSubnets(v []*string) *VpcConfig { // A single private workforce, which is automatically created when you create // your first private work team. You can create one private work force in each -// AWS Region. By default, any workforce-related API operation used in a specific -// region will apply to the workforce created in that region. To learn how to -// create a private workforce, see Create a Private Workforce (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). +// Amazon Web Services Region. By default, any workforce-related API operation +// used in a specific region will apply to the workforce created in that region. +// To learn how to create a private workforce, see Create a Private Workforce +// (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). type Workforce struct { _ struct{} `type:"structure"` diff --git a/service/sqs/api.go b/service/sqs/api.go index d58d7c03401..a07d209e035 100644 --- a/service/sqs/api.go +++ b/service/sqs/api.go @@ -64,12 +64,12 @@ func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // Only you, the owner of the queue, can grant or deny permissions to the queue. // For more information about these permissions, see Allow Developers to Write // Messages to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // * AddPermission generates a policy for you. You can use SetQueueAttributes // to upload your policy. For more information, see Using Custom Policies // with the Amazon SQS Access Policy Language (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // * An Amazon SQS policy can have a maximum of 7 actions. // @@ -87,7 +87,7 @@ func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -174,7 +174,7 @@ func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput // value. The default visibility timeout for a message is 30 seconds. The minimum // is 0 seconds. The maximum is 12 hours. For more information, see Visibility // Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // For example, you have a message with a visibility timeout of 5 minutes. After // 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. @@ -415,7 +415,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // create a new FIFO queue for your application or delete your existing standard // queue and recreate it as a FIFO queue. For more information, see Moving // From a Standard Queue to a FIFO Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // * If you don't provide a value for an attribute, the queue is created // with the default value for the attribute. @@ -450,7 +450,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -755,7 +755,7 @@ func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -918,7 +918,7 @@ func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, // must grant you permission to access the queue. For more information about // shared queue access, see AddPermission or see Allow Developers to Write Messages // to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1016,7 +1016,7 @@ func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueue // // For more information about using dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1149,11 +1149,11 @@ func (c *SQS) ListQueueTagsRequest(input *ListQueueTagsInput) (req *request.Requ // // List all cost allocation tags added to the specified Amazon SQS queue. For // an overview, see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1247,7 +1247,7 @@ func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, o // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1473,7 +1473,7 @@ func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Re // Retrieves one or more messages (up to 10), from the specified queue. Using // the WaitTimeSeconds parameter enables long-poll support. For more information, // see Amazon SQS Long Polling (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Short poll is the default behavior where a weighted random set of machines // is sampled on a ReceiveMessage call. Thus, only the messages on the sampled @@ -1500,14 +1500,14 @@ func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Re // // The receipt handle is the identifier you must provide when deleting the message. // For more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // You can provide the VisibilityTimeout parameter in your request. The parameter // is applied to the messages that Amazon SQS returns in the response. If you // don't include the parameter, the overall visibility timeout for the queue // is used for the returned messages. For more information, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // A message that isn't deleted or a message whose visibility isn't extended // before the visibility timeout expires counts as a failed receive. Depending @@ -1606,7 +1606,7 @@ func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *reques // // * Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // * To remove the ability to change queue permissions, you must deny permission // to the AddPermission, RemovePermission, and SetQueueAttributes actions @@ -1910,7 +1910,7 @@ func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *re // // * Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // * To remove the ability to change queue permissions, you must deny permission // to the AddPermission, RemovePermission, and SetQueueAttributes actions @@ -1996,7 +1996,7 @@ func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, outpu // // Add cost allocation tags to the specified Amazon SQS queue. For an overview, // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // When you use queue tags, keep the following guidelines in mind: // @@ -2010,12 +2010,12 @@ func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, outpu // * A new tag with a key identical to that of an existing tag overwrites // the existing tag. // -// For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) -// in the Amazon Simple Queue Service Developer Guide. +// For a full list of tag restrictions, see Quotas related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) +// in the Amazon SQS Developer Guide. // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2092,11 +2092,11 @@ func (c *SQS) UntagQueueRequest(input *UntagQueueInput) (req *request.Request, o // // Remove cost allocation tags from the specified Amazon SQS queue. For an overview, // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2129,11 +2129,10 @@ func (c *SQS) UntagQueueWithContext(ctx aws.Context, input *UntagQueueInput, opt type AddPermissionInput struct { _ struct{} `type:"structure"` - // The AWS account number of the principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) - // who is given permission. The principal must have an AWS account, but does - // not need to be signed up for Amazon SQS. For information about locating the - // AWS account identification, see Your AWS Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication) - // in the Amazon Simple Queue Service Developer Guide. + // The account numbers of the principals (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P) + // who are to receive permission. For information about locating the account + // identification, see Your Amazon Web Services Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication) + // in the Amazon SQS Developer Guide. // // AWSAccountIds is a required field AWSAccountIds []*string `locationNameList:"AWSAccountId" type:"list" flattened:"true" required:"true"` @@ -2143,7 +2142,7 @@ type AddPermissionInput struct { // // For more information about these actions, see Overview of Managing Access // Permissions to Your Amazon Simple Queue Service Resource (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-overview-of-managing-access.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n // also grants permissions for the corresponding batch versions of those actions: @@ -2607,8 +2606,9 @@ type CreateQueueInput struct { // Amazon SQS retains a message. Valid values: An integer from 60 seconds // (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). // - // * Policy – The queue's policy. A valid AWS policy. For more information - // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // * Policy – The queue's policy. A valid Amazon Web Services policy. For + // more information about policy structure, see Overview of Amazon Web Services + // IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) // in the Amazon IAM User Guide. // // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for @@ -2619,9 +2619,9 @@ type CreateQueueInput struct { // queue functionality of the source queue as a JSON object. For more information // about the redrive policy and dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon - // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount + // in the Amazon SQS Developer Guide. deadLetterTargetArn – The Amazon + // Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves + // messages after the value of maxReceiveCount is exceeded. maxReceiveCount // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message @@ -2633,25 +2633,26 @@ type CreateQueueInput struct { // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) - // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). - // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, - // the alias of a custom CMK can, for example, be alias/MyAlias . For more - // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the AWS Key Management Service API Reference. + // * KmsMasterKeyId – The ID of an Amazon Web Services managed customer + // master key (CMK) for Amazon SQS or a custom CMK. For more information, + // see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). + // While the alias of the Amazon Web Services managed CMK for Amazon SQS + // is always alias/aws/sqs, the alias of a custom CMK can, for example, be + // alias/MyAlias . For more examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) + // in the Key Management Service API Reference. // // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) - // to encrypt or decrypt messages before calling AWS KMS again. An integer - // representing seconds, between 60 seconds (1 minute) and 86,400 seconds - // (24 hours). Default: 300 (5 minutes). A shorter time period provides better - // security but results in more calls to KMS which might incur charges after - // Free Tier. For more information, see How Does the Data Key Reuse Period - // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). + // to encrypt or decrypt messages before calling KMS again. An integer representing + // seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). + // Default: 300 (5 minutes). A shorter time period provides better security + // but results in more calls to KMS which might incur charges after Free + // Tier. For more information, see How Does the Data Key Reuse Period Work? + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // @@ -2661,16 +2662,16 @@ type CreateQueueInput struct { // You can't change it for an existing queue. When you set this attribute, // you must also provide the MessageGroupId for your messages explicitly. // For more information, see FIFO queue logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // * ContentBasedDeduplication – Enables content-based deduplication. Valid // values are true and false. For more information, see Exactly-once processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon Simple Queue Service Developer Guide. Note the following: - // Every message must have a unique MessageDeduplicationId. You may provide - // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId - // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of + // in the Amazon SQS Developer Guide. Note the following: Every message must + // have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId + // explicitly. If you aren't able to provide a MessageDeduplicationId and + // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a + // SHA-256 hash to generate the MessageDeduplicationId using the body of // the message (but not the attributes of the message). If you don't provide // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication // set, the action fails with an error. If the queue has ContentBasedDeduplication @@ -2704,7 +2705,7 @@ type CreateQueueInput struct { // as specified. // // For information on throughput quotas, see Quotas related to messages (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // The name of the new queue. The following limits apply to this name: @@ -2723,7 +2724,7 @@ type CreateQueueInput struct { // Add cost allocation tags to the specified Amazon SQS queue. For an overview, // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // When you use queue tags, keep the following guidelines in mind: // @@ -2737,15 +2738,15 @@ type CreateQueueInput struct { // * A new tag with a key identical to that of an existing tag overwrites // the existing tag. // - // For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) - // in the Amazon Simple Queue Service Developer Guide. + // For a full list of tag restrictions, see Quotas related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) + // in the Amazon SQS Developer Guide. // // To be able to tag a queue on creation, you must have the sqs:CreateQueue // and sqs:TagQueue permissions. // // Cross-account permissions don't apply to this action. For more information, // see Grant cross-account permissions to a role and a user name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"` } @@ -3129,6 +3130,9 @@ type GetQueueAttributesInput struct { // A list of attributes for which to retrieve information. // + // The AttributeName.N parameter is optional, but if you don't specify values + // for this parameter, the request returns empty results. + // // In the future, new attributes might be added. If you write code that calls // this action, we recommend that you structure your code so that it can handle // new attributes gracefully. @@ -3180,9 +3184,9 @@ type GetQueueAttributesInput struct { // queue functionality of the source queue as a JSON object. For more information // about the redrive policy and dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon - // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount + // in the Amazon SQS Developer Guide. deadLetterTargetArn – The Amazon + // Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves + // messages after the value of maxReceiveCount is exceeded. maxReceiveCount // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message @@ -3191,31 +3195,31 @@ type GetQueueAttributesInput struct { // * VisibilityTimeout – Returns the visibility timeout for the queue. // For more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId – Returns the ID of an AWS-managed customer master - // key (CMK) for Amazon SQS or a custom CMK. For more information, see Key - // Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). + // * KmsMasterKeyId – Returns the ID of an Amazon Web Services managed + // customer master key (CMK) for Amazon SQS or a custom CMK. For more information, + // see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // // * KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, // for which Amazon SQS can reuse a data key to encrypt or decrypt messages - // before calling AWS KMS again. For more information, see How Does the Data + // before calling KMS again. For more information, see How Does the Data // Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // // * FifoQueue – Returns information about whether the queue is FIFO. For // more information, see FIFO queue logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-understanding-logic.html) - // in the Amazon Simple Queue Service Developer Guide. To determine whether - // a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), + // in the Amazon SQS Developer Guide. To determine whether a queue is FIFO + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // // * ContentBasedDeduplication – Returns whether content-based deduplication // is enabled for the queue. For more information, see Exactly-once processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // The following attributes apply only to high throughput for FIFO queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html): // @@ -3239,7 +3243,7 @@ type GetQueueAttributesInput struct { // as specified. // // For information on throughput quotas, see Quotas related to messages (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` // The URL of the Amazon SQS queue whose attribute information is retrieved. @@ -3320,7 +3324,7 @@ type GetQueueUrlInput struct { // QueueName is a required field QueueName *string `type:"string" required:"true"` - // The AWS account ID of the account that created the queue. + // The account ID of the account that created the queue. QueueOwnerAWSAccountId *string `type:"string"` } @@ -3360,7 +3364,7 @@ func (s *GetQueueUrlInput) SetQueueOwnerAWSAccountId(v string) *GetQueueUrlInput } // For more information, see Interpreting Responses (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-responses.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. type GetQueueUrlOutput struct { _ struct{} `type:"structure"` @@ -3662,12 +3666,12 @@ type Message struct { MD5OfMessageAttributes *string `type:"string"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon Simple Queue Service Developer Guide. + // see Amazon SQS message attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) + // in the Amazon SQS Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // A unique identifier for the message. A MessageIdis considered unique across - // all AWS accounts for an extended period of time. + // all accounts for an extended period of time. MessageId *string `type:"string"` // An identifier associated with the act of receiving the message. A new receipt @@ -3752,7 +3756,7 @@ type MessageAttributeValue struct { // // You can also append custom labels. For more information, see Amazon SQS Message // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // DataType is a required field DataType *string `type:"string" required:"true"` @@ -3840,7 +3844,7 @@ type MessageSystemAttributeValue struct { // // You can also append custom labels. For more information, see Amazon SQS Message // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // DataType is a required field DataType *string `type:"string" required:"true"` @@ -3975,7 +3979,7 @@ type ReceiveMessageInput struct { // * ApproximateReceiveCount – Returns the number of times a message has // been received across all queues but not deleted. // - // * AWSTraceHeader – Returns the AWS X-Ray trace header string. + // * AWSTraceHeader – Returns the X-Ray trace header string. // // * SenderId For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. // For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. @@ -4052,15 +4056,14 @@ type ReceiveMessageInput struct { // return the same messages and receipt handles. If a retry occurs within // the deduplication interval, it resets the visibility timeout. For more // information, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. If a caller of the - // ReceiveMessage action still processes messages when the visibility timeout - // expires and messages become visible, another worker consuming from the - // same queue can receive the same messages and therefore process duplicates. - // Also, if a consumer whose message processing time is longer than the visibility - // timeout tries to delete the processed messages, the action fails with - // an error. To mitigate this effect, ensure that your application observes - // a safe threshold before the visibility timeout expires and extend the - // visibility timeout as necessary. + // in the Amazon SQS Developer Guide. If a caller of the ReceiveMessage action + // still processes messages when the visibility timeout expires and messages + // become visible, another worker consuming from the same queue can receive + // the same messages and therefore process duplicates. Also, if a consumer + // whose message processing time is longer than the visibility timeout tries + // to delete the processed messages, the action fails with an error. To mitigate + // this effect, ensure that your application observes a safe threshold before + // the visibility timeout expires and extend the visibility timeout as necessary. // // * While messages with a particular MessageGroupId are invisible, no more // messages belonging to the same MessageGroupId are returned until the visibility @@ -4076,7 +4079,7 @@ type ReceiveMessageInput struct { // // For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId // Request Parameter (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-receiverequestattemptid-request-parameter.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. ReceiveRequestAttemptId *string `type:"string"` // The duration (in seconds) that the received messages are hidden from subsequent @@ -4383,8 +4386,8 @@ type SendMessageBatchRequestEntry struct { Id *string `type:"string" required:"true"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon Simple Queue Service Developer Guide. + // see Amazon SQS message attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) + // in the Amazon SQS Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // The body of the message. @@ -4398,7 +4401,7 @@ type SendMessageBatchRequestEntry struct { // interval. If a message with a particular MessageDeduplicationId is sent successfully, // subsequent messages with the same MessageDeduplicationId are accepted successfully // but aren't delivered. For more information, see Exactly-once processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // * Every message must have a unique MessageDeduplicationId, You may provide // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId @@ -4433,7 +4436,7 @@ type SendMessageBatchRequestEntry struct { // // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. MessageDeduplicationId *string `type:"string"` // This parameter applies only to FIFO (first-in-first-out) queues. @@ -4458,7 +4461,7 @@ type SendMessageBatchRequestEntry struct { // // For best practices of using MessageGroupId, see Using the MessageGroupId // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // MessageGroupId is required for FIFO queues. You can't use it for Standard // queues. @@ -4468,8 +4471,8 @@ type SendMessageBatchRequestEntry struct { // of a Name, Type, and Value. // // * Currently, the only supported message system attribute is AWSTraceHeader. - // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace header string. + // Its type must be String and its value must be a correctly formatted X-Ray + // trace header string. // // * The size of a message system attribute doesn't count towards the total // size of a message. @@ -4666,8 +4669,8 @@ type SendMessageInput struct { DelaySeconds *int64 `type:"integer"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) - // in the Amazon Simple Queue Service Developer Guide. + // see Amazon SQS message attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) + // in the Amazon SQS Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // The message to send. The minimum size is one character. The maximum size @@ -4691,7 +4694,7 @@ type SendMessageInput struct { // MessageDeduplicationId are accepted successfully but aren't delivered during // the 5-minute deduplication interval. For more information, see Exactly-once // processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // * Every message must have a unique MessageDeduplicationId, You may provide // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId @@ -4726,7 +4729,7 @@ type SendMessageInput struct { // // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. MessageDeduplicationId *string `type:"string"` // This parameter applies only to FIFO (first-in-first-out) queues. @@ -4751,7 +4754,7 @@ type SendMessageInput struct { // // For best practices of using MessageGroupId, see Using the MessageGroupId // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // MessageGroupId is required for FIFO queues. You can't use it for Standard // queues. @@ -4761,8 +4764,8 @@ type SendMessageInput struct { // of a Name, Type, and Value. // // * Currently, the only supported message system attribute is AWSTraceHeader. - // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace header string. + // Its type must be String and its value must be a correctly formatted X-Ray + // trace header string. // // * The size of a message system attribute doesn't count towards the total // size of a message. @@ -4887,7 +4890,7 @@ type SendMessageOutput struct { // An attribute containing the MessageId of the message sent to the queue. For // more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. MessageId *string `type:"string"` // This parameter applies only to FIFO (first-in-first-out) queues. @@ -4959,9 +4962,10 @@ type SetQueueAttributesInput struct { // Amazon SQS retains a message. Valid values: An integer representing seconds, // from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). // - // * Policy – The queue's policy. A valid AWS policy. For more information - // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) - // in the Amazon IAM User Guide. + // * Policy – The queue's policy. A valid Amazon Web Services policy. For + // more information about policy structure, see Overview of Amazon Web Services + // IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Identity and Access Management User Guide. // // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for // which a ReceiveMessage action waits for a message to arrive. Valid values: @@ -4971,9 +4975,9 @@ type SetQueueAttributesInput struct { // queue functionality of the source queue as a JSON object. For more information // about the redrive policy and dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon - // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount + // in the Amazon SQS Developer Guide. deadLetterTargetArn – The Amazon + // Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves + // messages after the value of maxReceiveCount is exceeded. maxReceiveCount // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message @@ -4985,36 +4989,37 @@ type SetQueueAttributesInput struct { // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) - // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). + // * KmsMasterKeyId – The ID of an Amazon Web Services managed customer + // master key (CMK) for Amazon SQS or a custom CMK. For more information, + // see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, // the alias of a custom CMK can, for example, be alias/MyAlias . For more // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) - // in the AWS Key Management Service API Reference. + // in the Key Management Service API Reference. // // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) - // to encrypt or decrypt messages before calling AWS KMS again. An integer - // representing seconds, between 60 seconds (1 minute) and 86,400 seconds - // (24 hours). Default: 300 (5 minutes). A shorter time period provides better - // security but results in more calls to KMS which might incur charges after - // Free Tier. For more information, see How Does the Data Key Reuse Period - // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). + // to encrypt or decrypt messages before calling KMS again. An integer representing + // seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). + // Default: 300 (5 minutes). A shorter time period provides better security + // but results in more calls to KMS which might incur charges after Free + // Tier. For more information, see How Does the Data Key Reuse Period Work? + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // // The following attribute applies only to FIFO (first-in-first-out) queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // // * ContentBasedDeduplication – Enables content-based deduplication. For // more information, see Exactly-once processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-exactly-once-processing.html) - // in the Amazon Simple Queue Service Developer Guide. Note the following: - // Every message must have a unique MessageDeduplicationId. You may provide - // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId - // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses - // a SHA-256 hash to generate the MessageDeduplicationId using the body of + // in the Amazon SQS Developer Guide. Note the following: Every message must + // have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId + // explicitly. If you aren't able to provide a MessageDeduplicationId and + // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a + // SHA-256 hash to generate the MessageDeduplicationId using the body of // the message (but not the attributes of the message). If you don't provide // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication // set, the action fails with an error. If the queue has ContentBasedDeduplication @@ -5048,7 +5053,7 @@ type SetQueueAttributesInput struct { // as specified. // // For information on throughput quotas, see Quotas related to messages (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) - // in the Amazon Simple Queue Service Developer Guide. + // in the Amazon SQS Developer Guide. // // Attributes is a required field Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` diff --git a/service/sqs/doc.go b/service/sqs/doc.go index 854208bcc67..57d7718cd14 100644 --- a/service/sqs/doc.go +++ b/service/sqs/doc.go @@ -3,20 +3,19 @@ // Package sqs provides the client and types for making API // requests to Amazon Simple Queue Service. // -// Welcome to the Amazon Simple Queue Service API Reference. +// Welcome to the Amazon SQS API Reference. // -// Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted -// queue for storing messages as they travel between applications or microservices. -// Amazon SQS moves data between distributed application components and helps -// you decouple these components. +// Amazon SQS is a reliable, highly-scalable hosted queue for storing messages +// as they travel between applications or microservices. Amazon SQS moves data +// between distributed application components and helps you decouple these components. // // For information on the permissions you need to use this API, see Identity // and access management (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-authentication-and-access-control.html) -// in the Amazon Simple Queue Service Developer Guide. +// in the Amazon SQS Developer Guide. // -// You can use AWS SDKs (http://aws.amazon.com/tools/#sdk) to access Amazon -// SQS using your favorite programming language. The SDKs perform tasks such -// as the following automatically: +// You can use Amazon Web Services SDKs (http://aws.amazon.com/tools/#sdk) to +// access Amazon SQS using your favorite programming language. The SDKs perform +// tasks such as the following automatically: // // * Cryptographically sign your service requests // @@ -28,11 +27,11 @@ // // * Amazon SQS Product Page (http://aws.amazon.com/sqs/) // -// * Amazon Simple Queue Service Developer Guide Making API Requests (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) +// * Amazon SQS Developer Guide Making API Requests (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) // Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // Amazon SQS Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // -// * Amazon SQS in the AWS CLI Command Reference (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html) +// * Amazon SQS in the Command Line Interface (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html) // // * Amazon Web Services General Reference Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) //